to solve gs render error
Browse files- LHM/models/__pycache__/modeling_human_lrm.cpython-310.pyc +0 -0
- LHM/models/modeling_human_lrm.py +1 -1
- LHM/models/rendering/__pycache__/gs_renderer.cpython-310.pyc +0 -0
- LHM/models/rendering/gs_renderer.py +20 -19
- LHM/runners/infer/__pycache__/human_lrm.cpython-310.pyc +0 -0
- LHM/runners/infer/human_lrm.py +2 -1
- LHM/utils/__pycache__/face_detector.cpython-310.pyc +0 -0
- app.py +84 -6
- engine/SegmentAPI/__pycache__/base.cpython-310.pyc +0 -0
- requirements_lhm.txt +2 -2
LHM/models/__pycache__/modeling_human_lrm.cpython-310.pyc
CHANGED
Binary files a/LHM/models/__pycache__/modeling_human_lrm.cpython-310.pyc and b/LHM/models/__pycache__/modeling_human_lrm.cpython-310.pyc differ
|
|
LHM/models/modeling_human_lrm.py
CHANGED
@@ -1092,7 +1092,7 @@ class ModelHumanLRMSapdinoBodyHeadSD3_5(ModelHumanLRMSapdinoBodyHeadSD3):
|
|
1092 |
print(f"time elpased(forward gs model):{time.time() - start_time}")
|
1093 |
return gs_model_list, query_points, smplx_params['transform_mat_neutral_pose']
|
1094 |
|
1095 |
-
|
1096 |
def animation_infer(self, gs_model_list, query_points, smplx_params, render_c2ws, render_intrs, render_bg_colors):
|
1097 |
'''Inference code avoid repeat forward.
|
1098 |
'''
|
|
|
1092 |
print(f"time elpased(forward gs model):{time.time() - start_time}")
|
1093 |
return gs_model_list, query_points, smplx_params['transform_mat_neutral_pose']
|
1094 |
|
1095 |
+
@torch.no_grad()
|
1096 |
def animation_infer(self, gs_model_list, query_points, smplx_params, render_c2ws, render_intrs, render_bg_colors):
|
1097 |
'''Inference code avoid repeat forward.
|
1098 |
'''
|
LHM/models/rendering/__pycache__/gs_renderer.cpython-310.pyc
CHANGED
Binary files a/LHM/models/rendering/__pycache__/gs_renderer.cpython-310.pyc and b/LHM/models/rendering/__pycache__/gs_renderer.cpython-310.pyc differ
|
|
LHM/models/rendering/gs_renderer.py
CHANGED
@@ -818,6 +818,7 @@ class GS3DRenderer(nn.Module):
|
|
818 |
def hyper_step(self, step):
|
819 |
self.gs_net.hyper_step(step)
|
820 |
|
|
|
821 |
def forward_single_view(
|
822 |
self,
|
823 |
gs: GaussianModel,
|
@@ -884,17 +885,17 @@ class GS3DRenderer(nn.Module):
|
|
884 |
|
885 |
# Rasterize visible Gaussians to image, obtain their radii (on screen).
|
886 |
# NOTE that dadong tries to regress rgb not shs
|
887 |
-
with torch.autocast(device_type=self.device.type, dtype=torch.float32):
|
888 |
-
|
889 |
-
|
890 |
-
|
891 |
-
|
892 |
-
|
893 |
-
|
894 |
-
|
895 |
-
|
896 |
-
|
897 |
-
|
898 |
|
899 |
ret = {
|
900 |
"comp_rgb": rendered_image.permute(1, 2, 0), # [H, W, 3]
|
@@ -934,7 +935,7 @@ class GS3DRenderer(nn.Module):
|
|
934 |
# ret["comp_mask"] = rendered_mask.permute(1, 2, 0)
|
935 |
|
936 |
return ret
|
937 |
-
|
938 |
def animate_gs_model(
|
939 |
self, gs_attr: GaussianAppOutput, query_points, smplx_data, debug=False
|
940 |
):
|
@@ -1063,7 +1064,7 @@ class GS3DRenderer(nn.Module):
|
|
1063 |
gs_list.append(gs_copy)
|
1064 |
|
1065 |
return gs_list, cano_gs_list
|
1066 |
-
|
1067 |
def forward_gs_attr(self, x, query_points, smplx_data, debug=False, x_fine=None):
|
1068 |
"""
|
1069 |
x: [N, C] Float[Tensor, "Np Cp"],
|
@@ -1080,7 +1081,7 @@ class GS3DRenderer(nn.Module):
|
|
1080 |
gs_attr: GaussianAppOutput = self.gs_net(x, query_points, x_fine)
|
1081 |
|
1082 |
return gs_attr
|
1083 |
-
|
1084 |
def get_query_points(self, smplx_data, device):
|
1085 |
with torch.no_grad():
|
1086 |
with torch.autocast(device_type=device.type, dtype=torch.float32):
|
@@ -1112,7 +1113,7 @@ class GS3DRenderer(nn.Module):
|
|
1112 |
pcl_embed.to(dtype=latent_feat.dtype), latent_feat, extra_info
|
1113 |
)
|
1114 |
return gs_feats
|
1115 |
-
|
1116 |
def query_latent_feat(
|
1117 |
self,
|
1118 |
positions: Float[Tensor, "*B N1 3"],
|
@@ -1137,7 +1138,7 @@ class GS3DRenderer(nn.Module):
|
|
1137 |
)
|
1138 |
|
1139 |
return gs_feats, positions, smplx_data
|
1140 |
-
|
1141 |
def forward_single_batch(
|
1142 |
self,
|
1143 |
gs_list: list[GaussianModel],
|
@@ -1286,7 +1287,7 @@ class GS3DRenderer(nn.Module):
|
|
1286 |
:, vidx : vidx + 1
|
1287 |
] # e.g. body_pose: [1, N_v, 21, 3] -> [1, 1, 21, 3]
|
1288 |
return smpl_data_single_view
|
1289 |
-
|
1290 |
def forward_gs(
|
1291 |
self,
|
1292 |
gs_hidden_features: Float[Tensor, "B Np Cp"],
|
@@ -1321,7 +1322,7 @@ class GS3DRenderer(nn.Module):
|
|
1321 |
gs_attr_list.append(gs_attr)
|
1322 |
|
1323 |
return gs_attr_list, query_points, smplx_data
|
1324 |
-
|
1325 |
def forward_animate_gs(
|
1326 |
self,
|
1327 |
gs_attr_list,
|
@@ -1394,7 +1395,7 @@ class GS3DRenderer(nn.Module):
|
|
1394 |
) # [B, NV, H, W, 3] -> [B, NV, 1, H, W]
|
1395 |
|
1396 |
return out
|
1397 |
-
|
1398 |
def forward(
|
1399 |
self,
|
1400 |
gs_hidden_features: Float[Tensor, "B Np Cp"],
|
|
|
818 |
def hyper_step(self, step):
|
819 |
self.gs_net.hyper_step(step)
|
820 |
|
821 |
+
@torch.no_grad()
|
822 |
def forward_single_view(
|
823 |
self,
|
824 |
gs: GaussianModel,
|
|
|
885 |
|
886 |
# Rasterize visible Gaussians to image, obtain their radii (on screen).
|
887 |
# NOTE that dadong tries to regress rgb not shs
|
888 |
+
# with torch.autocast(device_type=self.device.type, dtype=torch.float32):
|
889 |
+
rendered_image, radii, rendered_depth, rendered_alpha = rasterizer(
|
890 |
+
means3D=means3D.float(),
|
891 |
+
means2D=means2D.float(),
|
892 |
+
shs=shs,
|
893 |
+
colors_precomp=colors_precomp,
|
894 |
+
opacities=opacity.float(),
|
895 |
+
scales=scales.float(),
|
896 |
+
rotations=rotations.float(),
|
897 |
+
cov3D_precomp=cov3D_precomp,
|
898 |
+
)
|
899 |
|
900 |
ret = {
|
901 |
"comp_rgb": rendered_image.permute(1, 2, 0), # [H, W, 3]
|
|
|
935 |
# ret["comp_mask"] = rendered_mask.permute(1, 2, 0)
|
936 |
|
937 |
return ret
|
938 |
+
@torch.no_grad()
|
939 |
def animate_gs_model(
|
940 |
self, gs_attr: GaussianAppOutput, query_points, smplx_data, debug=False
|
941 |
):
|
|
|
1064 |
gs_list.append(gs_copy)
|
1065 |
|
1066 |
return gs_list, cano_gs_list
|
1067 |
+
@torch.no_grad()
|
1068 |
def forward_gs_attr(self, x, query_points, smplx_data, debug=False, x_fine=None):
|
1069 |
"""
|
1070 |
x: [N, C] Float[Tensor, "Np Cp"],
|
|
|
1081 |
gs_attr: GaussianAppOutput = self.gs_net(x, query_points, x_fine)
|
1082 |
|
1083 |
return gs_attr
|
1084 |
+
@torch.no_grad()
|
1085 |
def get_query_points(self, smplx_data, device):
|
1086 |
with torch.no_grad():
|
1087 |
with torch.autocast(device_type=device.type, dtype=torch.float32):
|
|
|
1113 |
pcl_embed.to(dtype=latent_feat.dtype), latent_feat, extra_info
|
1114 |
)
|
1115 |
return gs_feats
|
1116 |
+
@torch.no_grad()
|
1117 |
def query_latent_feat(
|
1118 |
self,
|
1119 |
positions: Float[Tensor, "*B N1 3"],
|
|
|
1138 |
)
|
1139 |
|
1140 |
return gs_feats, positions, smplx_data
|
1141 |
+
@torch.no_grad()
|
1142 |
def forward_single_batch(
|
1143 |
self,
|
1144 |
gs_list: list[GaussianModel],
|
|
|
1287 |
:, vidx : vidx + 1
|
1288 |
] # e.g. body_pose: [1, N_v, 21, 3] -> [1, 1, 21, 3]
|
1289 |
return smpl_data_single_view
|
1290 |
+
@torch.no_grad()
|
1291 |
def forward_gs(
|
1292 |
self,
|
1293 |
gs_hidden_features: Float[Tensor, "B Np Cp"],
|
|
|
1322 |
gs_attr_list.append(gs_attr)
|
1323 |
|
1324 |
return gs_attr_list, query_points, smplx_data
|
1325 |
+
@torch.no_grad()
|
1326 |
def forward_animate_gs(
|
1327 |
self,
|
1328 |
gs_attr_list,
|
|
|
1395 |
) # [B, NV, H, W, 3] -> [B, NV, 1, H, W]
|
1396 |
|
1397 |
return out
|
1398 |
+
@torch.no_grad()
|
1399 |
def forward(
|
1400 |
self,
|
1401 |
gs_hidden_features: Float[Tensor, "B Np Cp"],
|
LHM/runners/infer/__pycache__/human_lrm.cpython-310.pyc
CHANGED
Binary files a/LHM/runners/infer/__pycache__/human_lrm.cpython-310.pyc and b/LHM/runners/infer/__pycache__/human_lrm.cpython-310.pyc differ
|
|
LHM/runners/infer/human_lrm.py
CHANGED
@@ -6,6 +6,7 @@
|
|
6 |
# @Function : Inference code for human_lrm model
|
7 |
|
8 |
import argparse
|
|
|
9 |
import os
|
10 |
import pdb
|
11 |
import time
|
@@ -15,7 +16,7 @@ import numpy as np
|
|
15 |
import spaces
|
16 |
import torch
|
17 |
from accelerate.logging import get_logger
|
18 |
-
|
19 |
from PIL import Image
|
20 |
from tqdm.auto import tqdm
|
21 |
|
|
|
6 |
# @Function : Inference code for human_lrm model
|
7 |
|
8 |
import argparse
|
9 |
+
from omegaconf import OmegaConf
|
10 |
import os
|
11 |
import pdb
|
12 |
import time
|
|
|
16 |
import spaces
|
17 |
import torch
|
18 |
from accelerate.logging import get_logger
|
19 |
+
|
20 |
from PIL import Image
|
21 |
from tqdm.auto import tqdm
|
22 |
|
LHM/utils/__pycache__/face_detector.cpython-310.pyc
CHANGED
Binary files a/LHM/utils/__pycache__/face_detector.cpython-310.pyc and b/LHM/utils/__pycache__/face_detector.cpython-310.pyc differ
|
|
app.py
CHANGED
@@ -14,14 +14,21 @@
|
|
14 |
|
15 |
|
16 |
import os
|
|
|
|
|
|
|
17 |
from PIL import Image
|
18 |
import numpy as np
|
19 |
import gradio as gr
|
20 |
import base64
|
21 |
import spaces
|
|
|
|
|
22 |
import subprocess
|
23 |
import os
|
24 |
-
|
|
|
|
|
25 |
from engine.pose_estimation.pose_estimator import PoseEstimator
|
26 |
from LHM.utils.face_detector import VGGHeadDetector
|
27 |
from LHM.utils.hf_hub import wrap_model_hub
|
@@ -31,6 +38,29 @@ from LHM.runners.infer.utils import (
|
|
31 |
prepare_motion_seqs,
|
32 |
resize_image_keepaspect_np,
|
33 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
def infer_preprocess_image(
|
36 |
rgb_path,
|
@@ -244,6 +274,55 @@ def launch_env_not_compile_with_cuda():
|
|
244 |
os.system("pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py310_cu121_pyt251/download.html")
|
245 |
|
246 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
247 |
def assert_input_image(input_image):
|
248 |
if input_image is None:
|
249 |
raise gr.Error("No image selected or uploaded!")
|
@@ -273,7 +352,7 @@ def get_image_base64(path):
|
|
273 |
return f"data:image/png;base64,{encoded_string}"
|
274 |
|
275 |
|
276 |
-
def demo_lhm(pose_estimator, face_detector,
|
277 |
|
278 |
@spaces.GPU
|
279 |
def core_fn(image: str, video_params, working_dir):
|
@@ -311,6 +390,7 @@ def demo_lhm(pose_estimator, face_detector, lhm_model, cfg):
|
|
311 |
|
312 |
print(image_raw, motion_seqs_dir, dump_image_dir, dump_video_path)
|
313 |
|
|
|
314 |
|
315 |
shape_pose = pose_estimator(image_raw)
|
316 |
assert shape_pose.is_full_body, f"The input image is illegal, {shape_pose.msg}"
|
@@ -458,7 +538,7 @@ def demo_lhm(pose_estimator, face_detector, lhm_model, cfg):
|
|
458 |
:, batch_i : batch_i + batch_size
|
459 |
].to(device)
|
460 |
|
461 |
-
res =
|
462 |
render_c2ws=motion_seq["render_c2ws"][
|
463 |
:, batch_i : batch_i + batch_size
|
464 |
].to(device),
|
@@ -511,7 +591,6 @@ def demo_lhm(pose_estimator, face_detector, lhm_model, cfg):
|
|
511 |
verbose=True,
|
512 |
)
|
513 |
|
514 |
-
|
515 |
# self.infer_single(
|
516 |
# image_path,
|
517 |
# motion_seqs_dir=motion_seqs_dir,
|
@@ -525,8 +604,6 @@ def demo_lhm(pose_estimator, face_detector, lhm_model, cfg):
|
|
525 |
# shape_param=shape_pose.beta,
|
526 |
# )
|
527 |
|
528 |
-
|
529 |
-
|
530 |
# status = spaces.GPU(infer_impl(
|
531 |
# gradio_demo_image=image_raw,
|
532 |
# gradio_motion_file=smplx_params_dir,
|
@@ -695,6 +772,7 @@ def launch_gradio_app():
|
|
695 |
if __name__ == '__main__':
|
696 |
# launch_pretrained()
|
697 |
# launch_env_not_compile_with_cuda()
|
|
|
698 |
launch_gradio_app()
|
699 |
|
700 |
# import gradio as gr
|
|
|
14 |
|
15 |
|
16 |
import os
|
17 |
+
os.system("rm -rf /data-nvme/zerogpu-offload/")
|
18 |
+
import cv2
|
19 |
+
import time
|
20 |
from PIL import Image
|
21 |
import numpy as np
|
22 |
import gradio as gr
|
23 |
import base64
|
24 |
import spaces
|
25 |
+
import torch
|
26 |
+
torch._dynamo.config.disable = True
|
27 |
import subprocess
|
28 |
import os
|
29 |
+
import argparse
|
30 |
+
from omegaconf import OmegaConf
|
31 |
+
from rembg import remove
|
32 |
from engine.pose_estimation.pose_estimator import PoseEstimator
|
33 |
from LHM.utils.face_detector import VGGHeadDetector
|
34 |
from LHM.utils.hf_hub import wrap_model_hub
|
|
|
38 |
prepare_motion_seqs,
|
39 |
resize_image_keepaspect_np,
|
40 |
)
|
41 |
+
from engine.SegmentAPI.base import Bbox
|
42 |
+
|
43 |
+
def get_bbox(mask):
|
44 |
+
height, width = mask.shape
|
45 |
+
pha = mask / 255.0
|
46 |
+
pha[pha < 0.5] = 0.0
|
47 |
+
pha[pha >= 0.5] = 1.0
|
48 |
+
|
49 |
+
# obtain bbox
|
50 |
+
_h, _w = np.where(pha == 1)
|
51 |
+
|
52 |
+
whwh = [
|
53 |
+
_w.min().item(),
|
54 |
+
_h.min().item(),
|
55 |
+
_w.max().item(),
|
56 |
+
_h.max().item(),
|
57 |
+
]
|
58 |
+
|
59 |
+
box = Bbox(whwh)
|
60 |
+
|
61 |
+
# scale box to 1.05
|
62 |
+
scale_box = box.scale(1.1, width=width, height=height)
|
63 |
+
return scale_box
|
64 |
|
65 |
def infer_preprocess_image(
|
66 |
rgb_path,
|
|
|
274 |
os.system("pip install --no-index --no-cache-dir pytorch3d -f https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/py310_cu121_pyt251/download.html")
|
275 |
|
276 |
|
277 |
+
def animation_infer(renderer, gs_model_list, query_points, smplx_params, render_c2ws, render_intrs, render_bg_colors):
|
278 |
+
'''Inference code avoid repeat forward.
|
279 |
+
'''
|
280 |
+
render_h, render_w = int(render_intrs[0, 0, 1, 2] * 2), int(
|
281 |
+
render_intrs[0, 0, 0, 2] * 2
|
282 |
+
)
|
283 |
+
# render target views
|
284 |
+
render_res_list = []
|
285 |
+
num_views = render_c2ws.shape[1]
|
286 |
+
start_time = time.time()
|
287 |
+
|
288 |
+
# render target views
|
289 |
+
render_res_list = []
|
290 |
+
|
291 |
+
for view_idx in range(num_views):
|
292 |
+
render_res = renderer.forward_animate_gs(
|
293 |
+
gs_model_list,
|
294 |
+
query_points,
|
295 |
+
renderer.get_single_view_smpl_data(smplx_params, view_idx),
|
296 |
+
render_c2ws[:, view_idx : view_idx + 1],
|
297 |
+
render_intrs[:, view_idx : view_idx + 1],
|
298 |
+
render_h,
|
299 |
+
render_w,
|
300 |
+
render_bg_colors[:, view_idx : view_idx + 1],
|
301 |
+
)
|
302 |
+
render_res_list.append(render_res)
|
303 |
+
print(
|
304 |
+
f"time elpased(animate gs model per frame):{(time.time() - start_time)/num_views}"
|
305 |
+
)
|
306 |
+
|
307 |
+
out = defaultdict(list)
|
308 |
+
for res in render_res_list:
|
309 |
+
for k, v in res.items():
|
310 |
+
if isinstance(v[0], torch.Tensor):
|
311 |
+
out[k].append(v.detach().cpu())
|
312 |
+
else:
|
313 |
+
out[k].append(v)
|
314 |
+
for k, v in out.items():
|
315 |
+
# print(f"out key:{k}")
|
316 |
+
if isinstance(v[0], torch.Tensor):
|
317 |
+
out[k] = torch.concat(v, dim=1)
|
318 |
+
if k in ["comp_rgb", "comp_mask", "comp_depth"]:
|
319 |
+
out[k] = out[k][0].permute(
|
320 |
+
0, 2, 3, 1
|
321 |
+
) # [1, Nv, 3, H, W] -> [Nv, 3, H, W] - > [Nv, H, W, 3]
|
322 |
+
else:
|
323 |
+
out[k] = v
|
324 |
+
return out
|
325 |
+
|
326 |
def assert_input_image(input_image):
|
327 |
if input_image is None:
|
328 |
raise gr.Error("No image selected or uploaded!")
|
|
|
352 |
return f"data:image/png;base64,{encoded_string}"
|
353 |
|
354 |
|
355 |
+
def demo_lhm(pose_estimator, face_detector, lhm, cfg):
|
356 |
|
357 |
@spaces.GPU
|
358 |
def core_fn(image: str, video_params, working_dir):
|
|
|
390 |
|
391 |
print(image_raw, motion_seqs_dir, dump_image_dir, dump_video_path)
|
392 |
|
393 |
+
dump_tmp_dir = dump_image_dir
|
394 |
|
395 |
shape_pose = pose_estimator(image_raw)
|
396 |
assert shape_pose.is_full_body, f"The input image is illegal, {shape_pose.msg}"
|
|
|
538 |
:, batch_i : batch_i + batch_size
|
539 |
].to(device)
|
540 |
|
541 |
+
res = lhm.animation_infer(gs_model_list, query_points, batch_smplx_params,
|
542 |
render_c2ws=motion_seq["render_c2ws"][
|
543 |
:, batch_i : batch_i + batch_size
|
544 |
].to(device),
|
|
|
591 |
verbose=True,
|
592 |
)
|
593 |
|
|
|
594 |
# self.infer_single(
|
595 |
# image_path,
|
596 |
# motion_seqs_dir=motion_seqs_dir,
|
|
|
604 |
# shape_param=shape_pose.beta,
|
605 |
# )
|
606 |
|
|
|
|
|
607 |
# status = spaces.GPU(infer_impl(
|
608 |
# gradio_demo_image=image_raw,
|
609 |
# gradio_motion_file=smplx_params_dir,
|
|
|
772 |
if __name__ == '__main__':
|
773 |
# launch_pretrained()
|
774 |
# launch_env_not_compile_with_cuda()
|
775 |
+
# os.system("rm -rf /data-nvme/zerogpu-offload/")
|
776 |
launch_gradio_app()
|
777 |
|
778 |
# import gradio as gr
|
engine/SegmentAPI/__pycache__/base.cpython-310.pyc
CHANGED
Binary files a/engine/SegmentAPI/__pycache__/base.cpython-310.pyc and b/engine/SegmentAPI/__pycache__/base.cpython-310.pyc differ
|
|
requirements_lhm.txt
CHANGED
@@ -37,8 +37,8 @@ setuptools==74.0.0
|
|
37 |
taming_transformers_rom1504==0.0.6
|
38 |
timm==1.0.15
|
39 |
|
40 |
-
https://download.pytorch.org/whl/cu121/torch-2.5.1%2Bcu121-cp310-cp310-linux_x86_64.whl#sha256=92af92c569de5da937dd1afb45ecfdd598ec1254cf2e49e3d698cb24d71aae14
|
41 |
-
https://download.pytorch.org/whl/cu121/torchvision-0.20.1%2Bcu121-cp310-cp310-linux_x86_64.whl#sha256=304937b82c933d5155bd04d771f4b187273f67a76050bb4276b521f7e9b4c4e7
|
42 |
# https://download.pytorch.org/whl/cu121/xformers-0.0.29.post1-cp310-cp310-manylinux_2_28_x86_64.whl#sha256=e213ff8123e20602bd486739ffee4013338b02f9d2e0e4635a2912750854fdbe
|
43 |
|
44 |
tqdm==4.66.4
|
|
|
37 |
taming_transformers_rom1504==0.0.6
|
38 |
timm==1.0.15
|
39 |
|
40 |
+
# https://download.pytorch.org/whl/cu121/torch-2.5.1%2Bcu121-cp310-cp310-linux_x86_64.whl#sha256=92af92c569de5da937dd1afb45ecfdd598ec1254cf2e49e3d698cb24d71aae14
|
41 |
+
# https://download.pytorch.org/whl/cu121/torchvision-0.20.1%2Bcu121-cp310-cp310-linux_x86_64.whl#sha256=304937b82c933d5155bd04d771f4b187273f67a76050bb4276b521f7e9b4c4e7
|
42 |
# https://download.pytorch.org/whl/cu121/xformers-0.0.29.post1-cp310-cp310-manylinux_2_28_x86_64.whl#sha256=e213ff8123e20602bd486739ffee4013338b02f9d2e0e4635a2912750854fdbe
|
43 |
|
44 |
tqdm==4.66.4
|