Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -11,8 +11,6 @@ from torchvision import transforms
|
|
11 |
import random
|
12 |
import imageio
|
13 |
from controlnet_aux import CannyDetector
|
14 |
-
# from image_gen_aux import DepthPreprocessor
|
15 |
-
# import mediapipe as mp
|
16 |
from PIL import Image
|
17 |
import cv2
|
18 |
|
@@ -54,10 +52,6 @@ pipeline.load_lora_weights(
|
|
54 |
)
|
55 |
pipeline.set_adapters([CONTROL_LORAS["canny"]["adapter_name"]], adapter_weights=[1.0])
|
56 |
|
57 |
-
# Initialize MediaPipe pose estimation
|
58 |
-
# mp_drawing = mp.solutions.drawing_utils
|
59 |
-
# mp_drawing_styles = mp.solutions.drawing_styles
|
60 |
-
# mp_pose = mp.solutions.pose
|
61 |
|
62 |
canny_processor = CannyDetector()
|
63 |
|
@@ -126,51 +120,6 @@ def process_video_for_canny(video, width, height):
|
|
126 |
|
127 |
return canny_video
|
128 |
|
129 |
-
@spaces.GPU()
|
130 |
-
def process_video_for_pose(video):
|
131 |
-
"""
|
132 |
-
Process video for pose control using MediaPipe pose estimation.
|
133 |
-
Returns video frames with pose landmarks drawn on black background.
|
134 |
-
"""
|
135 |
-
print("Processing video for pose control...")
|
136 |
-
pose_video = []
|
137 |
-
|
138 |
-
with mp_pose.Pose(
|
139 |
-
static_image_mode=True,
|
140 |
-
model_complexity=1,
|
141 |
-
enable_segmentation=False,
|
142 |
-
min_detection_confidence=0.5,
|
143 |
-
min_tracking_confidence=0.5
|
144 |
-
) as pose:
|
145 |
-
|
146 |
-
for frame in video:
|
147 |
-
# Convert PIL image to numpy array
|
148 |
-
frame_np = np.array(frame)
|
149 |
-
|
150 |
-
# Convert RGB to BGR for MediaPipe
|
151 |
-
frame_bgr = cv2.cvtColor(frame_np, cv2.COLOR_RGB2BGR)
|
152 |
-
|
153 |
-
# Process the frame
|
154 |
-
results = pose.process(frame_bgr)
|
155 |
-
|
156 |
-
# Create black background with same dimensions
|
157 |
-
pose_frame = np.zeros_like(frame_np)
|
158 |
-
|
159 |
-
# Draw pose landmarks if detected
|
160 |
-
if results.pose_landmarks:
|
161 |
-
mp_drawing.draw_landmarks(
|
162 |
-
pose_frame,
|
163 |
-
results.pose_landmarks,
|
164 |
-
mp_pose.POSE_CONNECTIONS,
|
165 |
-
landmark_drawing_spec=mp_drawing_styles.get_default_pose_landmarks_style()
|
166 |
-
)
|
167 |
-
|
168 |
-
# Convert back to PIL Image
|
169 |
-
pose_pil = Image.fromarray(pose_frame)
|
170 |
-
pose_video.append(pose_pil)
|
171 |
-
|
172 |
-
return pose_video
|
173 |
-
|
174 |
def process_input_video(reference_video, width, height):
|
175 |
"""
|
176 |
Process the input video for canny edges and return both processed video and preview.
|
@@ -203,10 +152,6 @@ def process_video_for_control(reference_video, control_type, width, height):
|
|
203 |
if control_type == "canny":
|
204 |
# This should not be called for canny since it's pre-processed
|
205 |
processed_video = process_video_for_canny(video, width, height)
|
206 |
-
elif control_type == "depth":
|
207 |
-
processed_video = process_video_for_depth(video)
|
208 |
-
elif control_type == "pose":
|
209 |
-
processed_video = process_video_for_pose(video)
|
210 |
else:
|
211 |
processed_video = video
|
212 |
|
|
|
11 |
import random
|
12 |
import imageio
|
13 |
from controlnet_aux import CannyDetector
|
|
|
|
|
14 |
from PIL import Image
|
15 |
import cv2
|
16 |
|
|
|
52 |
)
|
53 |
pipeline.set_adapters([CONTROL_LORAS["canny"]["adapter_name"]], adapter_weights=[1.0])
|
54 |
|
|
|
|
|
|
|
|
|
55 |
|
56 |
canny_processor = CannyDetector()
|
57 |
|
|
|
120 |
|
121 |
return canny_video
|
122 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
123 |
def process_input_video(reference_video, width, height):
|
124 |
"""
|
125 |
Process the input video for canny edges and return both processed video and preview.
|
|
|
152 |
if control_type == "canny":
|
153 |
# This should not be called for canny since it's pre-processed
|
154 |
processed_video = process_video_for_canny(video, width, height)
|
|
|
|
|
|
|
|
|
155 |
else:
|
156 |
processed_video = video
|
157 |
|