Muhammad Taqi Raza
commited on
Commit
·
ea8870e
1
Parent(s):
fcac2ca
add app files
Browse files
app.py
CHANGED
@@ -3,6 +3,7 @@ import gradio as gr
|
|
3 |
import subprocess
|
4 |
import uuid
|
5 |
import shutil
|
|
|
6 |
from huggingface_hub import snapshot_download
|
7 |
|
8 |
# ----------------------------------------
|
@@ -21,6 +22,15 @@ if not os.path.exists(MODEL_PATH) or len(os.listdir(MODEL_PATH)) == 0:
|
|
21 |
)
|
22 |
print("✅ Download complete.")
|
23 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
def list_downloaded_files(model_path):
|
25 |
lines = []
|
26 |
for root, dirs, files in os.walk(model_path):
|
@@ -31,8 +41,6 @@ def list_downloaded_files(model_path):
|
|
31 |
for f in sorted(files):
|
32 |
lines.append(f"{subindent}📄 {f}")
|
33 |
return "\n".join(lines)
|
34 |
-
file_tree = list_downloaded_files(MODEL_PATH)
|
35 |
-
gr.Textbox(value=file_tree, label="Downloaded Files Tree", lines=20)
|
36 |
|
37 |
# ----------------------------------------
|
38 |
# Step 2: Setup Directories
|
@@ -52,7 +60,6 @@ def run_inference(video_path, save_format):
|
|
52 |
input_path = os.path.join(UPLOAD_DIR, input_name)
|
53 |
shutil.copy(video_path, input_path)
|
54 |
|
55 |
-
# --- Run inference script ---
|
56 |
cmd = [
|
57 |
"python", INFERENCE_SCRIPT,
|
58 |
"--input_dir", UPLOAD_DIR,
|
@@ -64,10 +71,7 @@ def run_inference(video_path, save_format):
|
|
64 |
|
65 |
try:
|
66 |
inference_result = subprocess.run(
|
67 |
-
cmd,
|
68 |
-
capture_output=True,
|
69 |
-
text=True,
|
70 |
-
check=True
|
71 |
)
|
72 |
print("📄 Inference stdout:\n", inference_result.stdout)
|
73 |
print("⚠️ Inference stderr:\n", inference_result.stderr)
|
@@ -77,7 +81,6 @@ def run_inference(video_path, save_format):
|
|
77 |
print("⚠️ STDERR:\n", e.stderr)
|
78 |
return f"Inference failed:\n{e.stderr}", None
|
79 |
|
80 |
-
# --- Convert .mkv to .mp4 ---
|
81 |
mkv_path = os.path.join(OUTPUT_DIR, input_name).replace(".mp4", ".mkv")
|
82 |
mp4_path = os.path.join(OUTPUT_DIR, input_name)
|
83 |
|
@@ -87,10 +90,7 @@ def run_inference(video_path, save_format):
|
|
87 |
]
|
88 |
try:
|
89 |
convert_result = subprocess.run(
|
90 |
-
convert_cmd,
|
91 |
-
capture_output=True,
|
92 |
-
text=True,
|
93 |
-
check=True
|
94 |
)
|
95 |
print("🔄 FFmpeg stdout:\n", convert_result.stdout)
|
96 |
print("⚠️ FFmpeg stderr:\n", convert_result.stderr)
|
@@ -106,11 +106,32 @@ def run_inference(video_path, save_format):
|
|
106 |
return "Output video not found.", None
|
107 |
|
108 |
# ----------------------------------------
|
109 |
-
# Step 4:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
# ----------------------------------------
|
111 |
with gr.Blocks() as demo:
|
112 |
-
gr.Markdown("# 🎥 DOVE Video
|
113 |
-
gr.Markdown("⚙️ **
|
114 |
|
115 |
with gr.Row():
|
116 |
input_video = gr.Video(label="Upload input video")
|
@@ -120,7 +141,7 @@ with gr.Blocks() as demo:
|
|
120 |
save_format = gr.Dropdown(
|
121 |
choices=["yuv444p", "yuv420p"],
|
122 |
value="yuv444p",
|
123 |
-
label="Save format
|
124 |
)
|
125 |
|
126 |
run_button = gr.Button("Run Inference")
|
@@ -132,4 +153,19 @@ with gr.Blocks() as demo:
|
|
132 |
outputs=[status, output_video],
|
133 |
)
|
134 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
135 |
demo.launch()
|
|
|
3 |
import subprocess
|
4 |
import uuid
|
5 |
import shutil
|
6 |
+
import psutil
|
7 |
from huggingface_hub import snapshot_download
|
8 |
|
9 |
# ----------------------------------------
|
|
|
22 |
)
|
23 |
print("✅ Download complete.")
|
24 |
|
25 |
+
print("\n📂 Directory structure after download:")
|
26 |
+
for root, dirs, files in os.walk(MODEL_PATH):
|
27 |
+
level = root.replace(MODEL_PATH, "").count(os.sep)
|
28 |
+
indent = " " * level
|
29 |
+
print(f"{indent}📁 {os.path.basename(root) or 'DOVE'}/")
|
30 |
+
subindent = " " * (level + 1)
|
31 |
+
for f in sorted(files):
|
32 |
+
print(f"{subindent}📄 {f}")
|
33 |
+
|
34 |
def list_downloaded_files(model_path):
|
35 |
lines = []
|
36 |
for root, dirs, files in os.walk(model_path):
|
|
|
41 |
for f in sorted(files):
|
42 |
lines.append(f"{subindent}📄 {f}")
|
43 |
return "\n".join(lines)
|
|
|
|
|
44 |
|
45 |
# ----------------------------------------
|
46 |
# Step 2: Setup Directories
|
|
|
60 |
input_path = os.path.join(UPLOAD_DIR, input_name)
|
61 |
shutil.copy(video_path, input_path)
|
62 |
|
|
|
63 |
cmd = [
|
64 |
"python", INFERENCE_SCRIPT,
|
65 |
"--input_dir", UPLOAD_DIR,
|
|
|
71 |
|
72 |
try:
|
73 |
inference_result = subprocess.run(
|
74 |
+
cmd, capture_output=True, text=True, check=True
|
|
|
|
|
|
|
75 |
)
|
76 |
print("📄 Inference stdout:\n", inference_result.stdout)
|
77 |
print("⚠️ Inference stderr:\n", inference_result.stderr)
|
|
|
81 |
print("⚠️ STDERR:\n", e.stderr)
|
82 |
return f"Inference failed:\n{e.stderr}", None
|
83 |
|
|
|
84 |
mkv_path = os.path.join(OUTPUT_DIR, input_name).replace(".mp4", ".mkv")
|
85 |
mp4_path = os.path.join(OUTPUT_DIR, input_name)
|
86 |
|
|
|
90 |
]
|
91 |
try:
|
92 |
convert_result = subprocess.run(
|
93 |
+
convert_cmd, capture_output=True, text=True, check=True
|
|
|
|
|
|
|
94 |
)
|
95 |
print("🔄 FFmpeg stdout:\n", convert_result.stdout)
|
96 |
print("⚠️ FFmpeg stderr:\n", convert_result.stderr)
|
|
|
106 |
return "Output video not found.", None
|
107 |
|
108 |
# ----------------------------------------
|
109 |
+
# Step 4: System Resource Functions
|
110 |
+
# ----------------------------------------
|
111 |
+
def get_resources():
|
112 |
+
ram = psutil.virtual_memory().used / (1024**3)
|
113 |
+
disk = shutil.disk_usage('/').used / (1024**3)
|
114 |
+
total_disk = shutil.disk_usage('/').total / (1024**3)
|
115 |
+
cpu = psutil.cpu_percent()
|
116 |
+
return (
|
117 |
+
f"🧠 RAM Used: {ram:.2f} GB\n"
|
118 |
+
f"📀 Disk Used: {disk:.2f} GB / {total_disk:.2f} GB\n"
|
119 |
+
f"🖥️ CPU Usage: {cpu:.2f}%"
|
120 |
+
)
|
121 |
+
|
122 |
+
def get_gpu_info():
|
123 |
+
try:
|
124 |
+
result = subprocess.run(["nvidia-smi"], capture_output=True, text=True, check=True)
|
125 |
+
return result.stdout
|
126 |
+
except Exception as e:
|
127 |
+
return "❌ GPU not available or error:\n" + str(e)
|
128 |
+
|
129 |
+
# ----------------------------------------
|
130 |
+
# Step 5: Gradio UI
|
131 |
# ----------------------------------------
|
132 |
with gr.Blocks() as demo:
|
133 |
+
gr.Markdown("# 🎥 DOVE Video Super-Resolution & Restoration Demo")
|
134 |
+
gr.Markdown("⚙️ **Tip:** Default save format is `yuv444p`. If playback fails, try `yuv420p`.")
|
135 |
|
136 |
with gr.Row():
|
137 |
input_video = gr.Video(label="Upload input video")
|
|
|
141 |
save_format = gr.Dropdown(
|
142 |
choices=["yuv444p", "yuv420p"],
|
143 |
value="yuv444p",
|
144 |
+
label="Save format"
|
145 |
)
|
146 |
|
147 |
run_button = gr.Button("Run Inference")
|
|
|
153 |
outputs=[status, output_video],
|
154 |
)
|
155 |
|
156 |
+
gr.Markdown("## 🧾 Downloaded Model Files")
|
157 |
+
gr.Textbox(value=list_downloaded_files(MODEL_PATH), label="Model Directory Tree", lines=20)
|
158 |
+
|
159 |
+
gr.Markdown("## 🧠 System Resources")
|
160 |
+
|
161 |
+
with gr.Row():
|
162 |
+
res_btn = gr.Button("Check CPU/RAM/Disk")
|
163 |
+
gpu_btn = gr.Button("Check GPU (if available)")
|
164 |
+
|
165 |
+
sys_info = gr.Textbox(label="CPU, RAM, Disk")
|
166 |
+
gpu_info = gr.Textbox(label="GPU Info", lines=10)
|
167 |
+
|
168 |
+
res_btn.click(fn=get_resources, inputs=[], outputs=sys_info)
|
169 |
+
gpu_btn.click(fn=get_gpu_info, inputs=[], outputs=gpu_info)
|
170 |
+
|
171 |
demo.launch()
|