pookiefoof commited on
Commit
b6d803b
·
1 Parent(s): 7c93add
2D_Stage/tuneavideo/pipelines/__pycache__/pipeline_tuneavideo.cpython-310.pyc CHANGED
Binary files a/2D_Stage/tuneavideo/pipelines/__pycache__/pipeline_tuneavideo.cpython-310.pyc and b/2D_Stage/tuneavideo/pipelines/__pycache__/pipeline_tuneavideo.cpython-310.pyc differ
 
3D_Stage/lrm/models/__pycache__/camera.cpython-310.pyc CHANGED
Binary files a/3D_Stage/lrm/models/__pycache__/camera.cpython-310.pyc and b/3D_Stage/lrm/models/__pycache__/camera.cpython-310.pyc differ
 
3D_Stage/lrm/models/camera.py CHANGED
@@ -29,5 +29,7 @@ class LinearCameraEmbedder(BaseModule):
29
  cond_tensors.append(cond.view(*cond.shape[:2], -1))
30
  cond_tensor = torch.cat(cond_tensors, dim=-1)
31
  assert cond_tensor.shape[-1] == self.cfg.in_channels
 
 
32
  embedding = self.linear(cond_tensor)
33
  return embedding
 
29
  cond_tensors.append(cond.view(*cond.shape[:2], -1))
30
  cond_tensor = torch.cat(cond_tensors, dim=-1)
31
  assert cond_tensor.shape[-1] == self.cfg.in_channels
32
+ #self.linear = self.linear.to("cuda")
33
+ #print(self.linear.weight.device, cond_tensor.device)
34
  embedding = self.linear(cond_tensor)
35
  return embedding
3D_Stage/lrm/models/exporters/__pycache__/mesh_exporter.cpython-310.pyc CHANGED
Binary files a/3D_Stage/lrm/models/exporters/__pycache__/mesh_exporter.cpython-310.pyc and b/3D_Stage/lrm/models/exporters/__pycache__/mesh_exporter.cpython-310.pyc differ
 
3D_Stage/lrm/models/exporters/mesh_exporter.py CHANGED
@@ -101,6 +101,7 @@ class MeshExporter(Exporter):
101
  def get_texture_maps(
102
  self, scene_code: torch.Tensor, mesh: Mesh
103
  ) -> Dict[str, torch.Tensor]:
 
104
  assert mesh.has_uv
105
  # clip space transform
106
  uv_clip = mesh.v_tex * 2.0 - 1.0
 
101
  def get_texture_maps(
102
  self, scene_code: torch.Tensor, mesh: Mesh
103
  ) -> Dict[str, torch.Tensor]:
104
+ #self.ctx = NVDiffRasterizerContext(self.cfg.context_type, self.device)
105
  assert mesh.has_uv
106
  # clip space transform
107
  uv_clip = mesh.v_tex * 2.0 - 1.0
3D_Stage/lrm/models/renderers/__pycache__/triplane_dmtet.cpython-310.pyc CHANGED
Binary files a/3D_Stage/lrm/models/renderers/__pycache__/triplane_dmtet.cpython-310.pyc and b/3D_Stage/lrm/models/renderers/__pycache__/triplane_dmtet.cpython-310.pyc differ
 
3D_Stage/lrm/models/renderers/triplane_dmtet.py CHANGED
@@ -44,6 +44,8 @@ class TriplaneDMTetRenderer(BaseRenderer):
44
  assert self.cfg.feature_reduction in ["concat", "mean"]
45
 
46
  self.ctx = NVDiffRasterizerContext(self.cfg.context_type, self.device)
 
 
47
  self.isosurface_helper = MarchingTetrahedraHelper(
48
  self.cfg.isosurface_resolution,
49
  os.path.join(self.cfg.tet_dir, f"{self.cfg.isosurface_resolution}_tets.npz"),
@@ -130,6 +132,7 @@ class TriplaneDMTetRenderer(BaseRenderer):
130
  background_color: Optional[Float[Tensor, "3"]],
131
  extra_sdf_query: Any = None,
132
  ) -> Dict[str, Tensor]:
 
133
  Nv = mvp_mtx.shape[0]
134
 
135
  out = {}
 
44
  assert self.cfg.feature_reduction in ["concat", "mean"]
45
 
46
  self.ctx = NVDiffRasterizerContext(self.cfg.context_type, self.device)
47
+ #self.ctx = None
48
+
49
  self.isosurface_helper = MarchingTetrahedraHelper(
50
  self.cfg.isosurface_resolution,
51
  os.path.join(self.cfg.tet_dir, f"{self.cfg.isosurface_resolution}_tets.npz"),
 
132
  background_color: Optional[Float[Tensor, "3"]],
133
  extra_sdf_query: Any = None,
134
  ) -> Dict[str, Tensor]:
135
+ #self.ctx = NVDiffRasterizerContext(self.cfg.context_type, self.device)
136
  Nv = mvp_mtx.shape[0]
137
 
138
  out = {}
3D_Stage/lrm/systems/__pycache__/multiview_lrm.cpython-310.pyc CHANGED
Binary files a/3D_Stage/lrm/systems/__pycache__/multiview_lrm.cpython-310.pyc and b/3D_Stage/lrm/systems/__pycache__/multiview_lrm.cpython-310.pyc differ
 
3D_Stage/lrm/systems/multiview_lrm.py CHANGED
@@ -62,19 +62,21 @@ class MultiviewLRM(BaseSystem):
62
 
63
  def configure(self):
64
  super().configure()
 
65
  self.image_tokenizer = lrm.find(self.cfg.image_tokenizer_cls)(
66
  self.cfg.image_tokenizer
67
- )
68
  if self.cfg.image_tokenizer.modulation:
69
  self.camera_embedder = lrm.find(self.cfg.camera_embedder_cls)(
70
  self.cfg.camera_embedder
71
- )
72
- self.tokenizer = lrm.find(self.cfg.tokenizer_cls)(self.cfg.tokenizer)
 
73
  self.backbone = lrm.find(self.cfg.backbone_cls)(self.cfg.backbone)
74
  self.post_processor = lrm.find(self.cfg.post_processor_cls)(
75
  self.cfg.post_processor
76
  )
77
- self.decoder = lrm.find(self.cfg.decoder_cls)(self.cfg.decoder)
78
  self.material = lrm.find(self.cfg.material_cls)(self.cfg.material)
79
  self.background = lrm.find(self.cfg.background_cls)(self.cfg.background)
80
  self.renderer = lrm.find(self.cfg.renderer_cls)(
 
62
 
63
  def configure(self):
64
  super().configure()
65
+ device = torch.device("cuda")
66
  self.image_tokenizer = lrm.find(self.cfg.image_tokenizer_cls)(
67
  self.cfg.image_tokenizer
68
+ ).to(device)
69
  if self.cfg.image_tokenizer.modulation:
70
  self.camera_embedder = lrm.find(self.cfg.camera_embedder_cls)(
71
  self.cfg.camera_embedder
72
+ ).to(device)
73
+ #print(device, self.camera_embedder.linear.weight.device)
74
+ self.tokenizer = lrm.find(self.cfg.tokenizer_cls)(self.cfg.tokenizer).to(device)
75
  self.backbone = lrm.find(self.cfg.backbone_cls)(self.cfg.backbone)
76
  self.post_processor = lrm.find(self.cfg.post_processor_cls)(
77
  self.cfg.post_processor
78
  )
79
+ self.decoder = lrm.find(self.cfg.decoder_cls)(self.cfg.decoder).to(device)
80
  self.material = lrm.find(self.cfg.material_cls)(self.cfg.material)
81
  self.background = lrm.find(self.cfg.background_cls)(self.cfg.background)
82
  self.renderer = lrm.find(self.cfg.renderer_cls)(
3D_Stage/lrm/utils/__pycache__/rasterize.cpython-310.pyc CHANGED
Binary files a/3D_Stage/lrm/utils/__pycache__/rasterize.cpython-310.pyc and b/3D_Stage/lrm/utils/__pycache__/rasterize.cpython-310.pyc differ
 
3D_Stage/lrm/utils/rasterize.py CHANGED
@@ -7,8 +7,8 @@ from .typing import *
7
  class NVDiffRasterizerContext:
8
  def __init__(self, context_type: str, device: torch.device) -> None:
9
  self.device = device
10
- self.ctx = None
11
- #self.ctx = self.initialize_context(context_type, device)
12
 
13
  def initialize_context(
14
  self, context_type: str, device: torch.device
 
7
  class NVDiffRasterizerContext:
8
  def __init__(self, context_type: str, device: torch.device) -> None:
9
  self.device = device
10
+ #self.ctx = None
11
+ self.ctx = self.initialize_context(context_type, device)
12
 
13
  def initialize_context(
14
  self, context_type: str, device: torch.device
app.py CHANGED
@@ -309,7 +309,7 @@ class Inference3D_API:
309
  def __init__(self, device="cuda"):
310
  self.cfg = load_config("3D_Stage/configs/infer.yaml", makedirs=False)
311
  print("Loading system")
312
- self.device = device
313
  self.cfg.system.weights = self.cfg.system.weights.replace("./", "./3D_Stage/")
314
  self.cfg.system.image_tokenizer.pretrained_model_name_or_path = \
315
  self.cfg.system.image_tokenizer.pretrained_model_name_or_path.replace("./", "./3D_Stage/")
@@ -318,8 +318,9 @@ class Inference3D_API:
318
  self.system = lrm.find(self.cfg.system_cls)(self.cfg.system).to(self.device)
319
  self.system.eval()
320
 
321
- @spaces.GPU
322
  def process_images(self, img_input0, img_input1, img_input2, img_input3, back_proj, smooth_iter):
 
323
  meta = json.load(open("./3D_Stage/material/meta.json"))
324
  c2w_cond = [np.array(loc["transform_matrix"]) for loc in meta["locations"]]
325
  c2w_cond = torch.from_numpy(np.stack(c2w_cond, axis=0)).float()[None].to(self.device)
 
309
  def __init__(self, device="cuda"):
310
  self.cfg = load_config("3D_Stage/configs/infer.yaml", makedirs=False)
311
  print("Loading system")
312
+ self.device = torch.device(device)
313
  self.cfg.system.weights = self.cfg.system.weights.replace("./", "./3D_Stage/")
314
  self.cfg.system.image_tokenizer.pretrained_model_name_or_path = \
315
  self.cfg.system.image_tokenizer.pretrained_model_name_or_path.replace("./", "./3D_Stage/")
 
318
  self.system = lrm.find(self.cfg.system_cls)(self.cfg.system).to(self.device)
319
  self.system.eval()
320
 
321
+ @spaces.GPU(duration=120)
322
  def process_images(self, img_input0, img_input1, img_input2, img_input3, back_proj, smooth_iter):
323
+ #self.system = self.system.to("cuda")
324
  meta = json.load(open("./3D_Stage/material/meta.json"))
325
  c2w_cond = [np.array(loc["transform_matrix"]) for loc in meta["locations"]]
326
  c2w_cond = torch.from_numpy(np.stack(c2w_cond, axis=0)).float()[None].to(self.device)
input.png CHANGED
input_3D.png ADDED