Spaces:
Running
on
Zero
Running
on
Zero
xinjie.wang
commited on
Commit
·
cbafcf5
1
Parent(s):
d8efb25
update
Browse files- app.py +1 -1
- asset3d_gen/models/delight_model.py +2 -2
- asset3d_gen/models/gs_model.py +1 -0
- asset3d_gen/models/sr_model.py +1 -1
- asset3d_gen/models/text_model.py +3 -3
- asset3d_gen/models/texture_model.py +1 -1
- asset3d_gen/validators/urdf_convertor.py +6 -4
- common.py +1 -1
- requirements.txt +0 -4
app.py
CHANGED
@@ -218,7 +218,7 @@ with gr.Blocks(
|
|
218 |
height=300,
|
219 |
)
|
220 |
model_output_gs = gr.Model3D(
|
221 |
-
label="Gaussian Representation", height=300, interactive=False
|
222 |
)
|
223 |
aligned_gs = gr.Textbox(visible=False)
|
224 |
with gr.Row():
|
|
|
218 |
height=300,
|
219 |
)
|
220 |
model_output_gs = gr.Model3D(
|
221 |
+
label="Gaussian Representation", height=300, interactive=False
|
222 |
)
|
223 |
aligned_gs = gr.Textbox(visible=False)
|
224 |
with gr.Row():
|
asset3d_gen/models/delight_model.py
CHANGED
@@ -57,8 +57,8 @@ class DelightingModel(object):
|
|
57 |
pipeline.set_progress_bar_config(disable=True)
|
58 |
|
59 |
pipeline.to(self.device, torch.float16)
|
60 |
-
|
61 |
-
|
62 |
self.pipeline = pipeline
|
63 |
|
64 |
def recenter_image(
|
|
|
57 |
pipeline.set_progress_bar_config(disable=True)
|
58 |
|
59 |
pipeline.to(self.device, torch.float16)
|
60 |
+
pipeline.enable_model_cpu_offload()
|
61 |
+
pipeline.enable_xformers_memory_efficient_attention()
|
62 |
self.pipeline = pipeline
|
63 |
|
64 |
def recenter_image(
|
asset3d_gen/models/gs_model.py
CHANGED
@@ -254,6 +254,7 @@ class GaussianBase:
|
|
254 |
opacities = numpy_data["_opacities"]
|
255 |
sh0 = numpy_data["_features_dc"]
|
256 |
shN = numpy_data.get("_features_rest", np.zeros((means.shape[0], 0)))
|
|
|
257 |
|
258 |
# Create a mask to identify rows with NaN or Inf in any of the numpy_data arrays # noqa
|
259 |
if enable_mask:
|
|
|
254 |
opacities = numpy_data["_opacities"]
|
255 |
sh0 = numpy_data["_features_dc"]
|
256 |
shN = numpy_data.get("_features_rest", np.zeros((means.shape[0], 0)))
|
257 |
+
shN = shN.reshape(means.shape[0], -1)
|
258 |
|
259 |
# Create a mask to identify rows with NaN or Inf in any of the numpy_data arrays # noqa
|
260 |
if enable_mask:
|
asset3d_gen/models/sr_model.py
CHANGED
@@ -33,7 +33,7 @@ class ImageStableSR:
|
|
33 |
torch_dtype=torch.float16,
|
34 |
).to(device)
|
35 |
self.up_pipeline_x4.set_progress_bar_config(disable=True)
|
36 |
-
|
37 |
|
38 |
def __call__(
|
39 |
self,
|
|
|
33 |
torch_dtype=torch.float16,
|
34 |
).to(device)
|
35 |
self.up_pipeline_x4.set_progress_bar_config(disable=True)
|
36 |
+
self.up_pipeline_x4.enable_model_cpu_offload()
|
37 |
|
38 |
def __call__(
|
39 |
self,
|
asset3d_gen/models/text_model.py
CHANGED
@@ -75,7 +75,7 @@ def build_text2img_ip_pipeline(
|
|
75 |
pipe.set_ip_adapter_scale([ref_scale])
|
76 |
|
77 |
pipe = pipe.to(device)
|
78 |
-
|
79 |
# pipe.enable_xformers_memory_efficient_attention()
|
80 |
# pipe.enable_vae_slicing()
|
81 |
|
@@ -106,8 +106,8 @@ def build_text2img_pipeline(
|
|
106 |
force_zeros_for_empty_prompt=False,
|
107 |
)
|
108 |
pipe = pipe.to(device)
|
109 |
-
|
110 |
-
|
111 |
|
112 |
return pipe
|
113 |
|
|
|
75 |
pipe.set_ip_adapter_scale([ref_scale])
|
76 |
|
77 |
pipe = pipe.to(device)
|
78 |
+
pipe.enable_model_cpu_offload()
|
79 |
# pipe.enable_xformers_memory_efficient_attention()
|
80 |
# pipe.enable_vae_slicing()
|
81 |
|
|
|
106 |
force_zeros_for_empty_prompt=False,
|
107 |
)
|
108 |
pipe = pipe.to(device)
|
109 |
+
pipe.enable_model_cpu_offload()
|
110 |
+
pipe.enable_xformers_memory_efficient_attention()
|
111 |
|
112 |
return pipe
|
113 |
|
asset3d_gen/models/texture_model.py
CHANGED
@@ -86,6 +86,6 @@ def build_texture_gen_pipe(
|
|
86 |
pipe.set_ip_adapter_scale([ip_adapt_scale])
|
87 |
|
88 |
pipe = pipe.to(device)
|
89 |
-
|
90 |
|
91 |
return pipe
|
|
|
86 |
pipe.set_ip_adapter_scale([ip_adapt_scale])
|
87 |
|
88 |
pipe = pipe.to(device)
|
89 |
+
pipe.enable_model_cpu_offload()
|
90 |
|
91 |
return pipe
|
asset3d_gen/validators/urdf_convertor.py
CHANGED
@@ -165,7 +165,6 @@ class URDFGenerator(object):
|
|
165 |
"description": description.lower(),
|
166 |
"min_height": round(min_height, 4),
|
167 |
"max_height": round(max_height, 4),
|
168 |
-
"real_height": round((min_height + max_height) / 2, 4),
|
169 |
"min_mass": round(min_mass, 4),
|
170 |
"max_mass": round(max_mass, 4),
|
171 |
"mu1": round(mu1, 2),
|
@@ -376,11 +375,10 @@ class URDFGenerator(object):
|
|
376 |
response = self.gpt_client.query(text_prompt, image_path)
|
377 |
if response is None:
|
378 |
asset_attrs = {
|
379 |
-
"category":
|
380 |
-
"description":
|
381 |
"min_height": 1,
|
382 |
"max_height": 1,
|
383 |
-
"real_height": 1,
|
384 |
"min_mass": 1,
|
385 |
"max_mass": 1,
|
386 |
"mu1": 0.8,
|
@@ -394,6 +392,10 @@ class URDFGenerator(object):
|
|
394 |
if key in kwargs:
|
395 |
asset_attrs[key] = kwargs[key]
|
396 |
|
|
|
|
|
|
|
|
|
397 |
self.estimated_attrs = self.get_estimated_attributes(asset_attrs)
|
398 |
|
399 |
urdf_path = self.generate_urdf(mesh_path, output_root, asset_attrs)
|
|
|
165 |
"description": description.lower(),
|
166 |
"min_height": round(min_height, 4),
|
167 |
"max_height": round(max_height, 4),
|
|
|
168 |
"min_mass": round(min_mass, 4),
|
169 |
"max_mass": round(max_mass, 4),
|
170 |
"mu1": round(mu1, 2),
|
|
|
375 |
response = self.gpt_client.query(text_prompt, image_path)
|
376 |
if response is None:
|
377 |
asset_attrs = {
|
378 |
+
"category": category.lower(),
|
379 |
+
"description": category.lower(),
|
380 |
"min_height": 1,
|
381 |
"max_height": 1,
|
|
|
382 |
"min_mass": 1,
|
383 |
"max_mass": 1,
|
384 |
"mu1": 0.8,
|
|
|
392 |
if key in kwargs:
|
393 |
asset_attrs[key] = kwargs[key]
|
394 |
|
395 |
+
asset_attrs["real_height"] = round(
|
396 |
+
(asset_attrs["min_height"] + asset_attrs["max_height"]) / 2, 4
|
397 |
+
)
|
398 |
+
|
399 |
self.estimated_attrs = self.get_estimated_attributes(asset_attrs)
|
400 |
|
401 |
urdf_path = self.generate_urdf(mesh_path, output_root, asset_attrs)
|
common.py
CHANGED
@@ -637,7 +637,7 @@ def text2image_fn(
|
|
637 |
if postprocess:
|
638 |
for idx in range(len(images)):
|
639 |
image = images[idx]
|
640 |
-
images[idx] = preprocess_image_fn(image)
|
641 |
|
642 |
save_paths = []
|
643 |
for idx, image in enumerate(images):
|
|
|
637 |
if postprocess:
|
638 |
for idx in range(len(images)):
|
639 |
image = images[idx]
|
640 |
+
images[idx] = preprocess_image_fn(image, req)
|
641 |
|
642 |
save_paths = []
|
643 |
for idx, image in enumerate(images):
|
requirements.txt
CHANGED
@@ -41,7 +41,3 @@ https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.0.post2/flas
|
|
41 |
https://huggingface.co/spaces/xinjjj/ImgRoboAssetGen/resolve/main/wheels/diff_gaussian_rasterization-0.0.0-cp310-cp310-linux_x86_64.whl
|
42 |
https://huggingface.co/spaces/xinjjj/ImgRoboAssetGen/resolve/main/wheels/nvdiffrast-0.3.3-cp310-cp310-linux_x86_64.whl
|
43 |
https://huggingface.co/spaces/xinjjj/ImgRoboAssetGen/resolve/main/wheels/kaolin-0.16.0-cp310-cp310-linux_x86_64.whl
|
44 |
-
|
45 |
-
# https://huggingface.co/spaces/JeffreyXiang/TRELLIS/resolve/main/wheels/diff_gaussian_rasterization-0.0.0-cp310-cp310-linux_x86_64.whl
|
46 |
-
# https://huggingface.co/spaces/JeffreyXiang/TRELLIS/resolve/main/wheels/nvdiffrast-0.3.3-cp310-cp310-linux_x86_64.whl
|
47 |
-
# kaolin@git+https://github.com/NVIDIAGameWorks/[email protected]
|
|
|
41 |
https://huggingface.co/spaces/xinjjj/ImgRoboAssetGen/resolve/main/wheels/diff_gaussian_rasterization-0.0.0-cp310-cp310-linux_x86_64.whl
|
42 |
https://huggingface.co/spaces/xinjjj/ImgRoboAssetGen/resolve/main/wheels/nvdiffrast-0.3.3-cp310-cp310-linux_x86_64.whl
|
43 |
https://huggingface.co/spaces/xinjjj/ImgRoboAssetGen/resolve/main/wheels/kaolin-0.16.0-cp310-cp310-linux_x86_64.whl
|
|
|
|
|
|
|
|