Spaces:
Runtime error
Runtime error
tokenid
commited on
Commit
·
3ec2346
1
Parent(s):
3b4754e
optimize
Browse files- app.py +12 -13
- src/models/renderer/utils/renderer.py +0 -2
app.py
CHANGED
|
@@ -147,6 +147,7 @@ def preprocess(input_image, do_remove_background):
|
|
| 147 |
return input_image
|
| 148 |
|
| 149 |
|
|
|
|
| 150 |
def generate_mvs(input_image, sample_steps, sample_seed):
|
| 151 |
|
| 152 |
seed_everything(sample_seed)
|
|
@@ -166,22 +167,13 @@ def generate_mvs(input_image, sample_steps, sample_seed):
|
|
| 166 |
|
| 167 |
|
| 168 |
@spaces.GPU
|
| 169 |
-
def make3d(
|
| 170 |
-
|
| 171 |
-
cuda_path = find_cuda()
|
| 172 |
-
|
| 173 |
-
if cuda_path:
|
| 174 |
-
print(f"CUDA installation found at: {cuda_path}")
|
| 175 |
-
else:
|
| 176 |
-
print("CUDA installation not found")
|
| 177 |
|
| 178 |
global model
|
| 179 |
if IS_FLEXICUBES:
|
| 180 |
model.init_flexicubes_geometry(device, use_renderer=False)
|
| 181 |
model = model.eval()
|
| 182 |
|
| 183 |
-
images, show_images = generate_mvs(input_image, sample_steps, sample_seed)
|
| 184 |
-
|
| 185 |
images = np.asarray(images, dtype=np.float32) / 255.0
|
| 186 |
images = torch.from_numpy(images).permute(2, 0, 1).contiguous().float() # (3, 960, 640)
|
| 187 |
images = rearrange(images, 'c (n h) (m w) -> (n m) c h w', n=3, m=2) # (6, 3, 320, 320)
|
|
@@ -246,7 +238,7 @@ def make3d(input_image, sample_steps, sample_seed):
|
|
| 246 |
|
| 247 |
print(f"Mesh saved to {mesh_fpath}")
|
| 248 |
|
| 249 |
-
return mesh_fpath
|
| 250 |
|
| 251 |
|
| 252 |
_HEADER_ = '''
|
|
@@ -349,14 +341,21 @@ with gr.Blocks() as demo:
|
|
| 349 |
gr.Markdown(_LINKS_)
|
| 350 |
gr.Markdown(_CITE_)
|
| 351 |
|
|
|
|
|
|
|
| 352 |
submit.click(fn=check_input_image, inputs=[input_image]).success(
|
| 353 |
fn=preprocess,
|
| 354 |
inputs=[input_image, do_remove_background],
|
| 355 |
outputs=[processed_image],
|
| 356 |
).success(
|
| 357 |
-
fn=
|
| 358 |
inputs=[processed_image, sample_steps, sample_seed],
|
| 359 |
-
outputs=[
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 360 |
)
|
| 361 |
|
| 362 |
demo.launch()
|
|
|
|
| 147 |
return input_image
|
| 148 |
|
| 149 |
|
| 150 |
+
@spaces.GPU
|
| 151 |
def generate_mvs(input_image, sample_steps, sample_seed):
|
| 152 |
|
| 153 |
seed_everything(sample_seed)
|
|
|
|
| 167 |
|
| 168 |
|
| 169 |
@spaces.GPU
|
| 170 |
+
def make3d(images):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 171 |
|
| 172 |
global model
|
| 173 |
if IS_FLEXICUBES:
|
| 174 |
model.init_flexicubes_geometry(device, use_renderer=False)
|
| 175 |
model = model.eval()
|
| 176 |
|
|
|
|
|
|
|
| 177 |
images = np.asarray(images, dtype=np.float32) / 255.0
|
| 178 |
images = torch.from_numpy(images).permute(2, 0, 1).contiguous().float() # (3, 960, 640)
|
| 179 |
images = rearrange(images, 'c (n h) (m w) -> (n m) c h w', n=3, m=2) # (6, 3, 320, 320)
|
|
|
|
| 238 |
|
| 239 |
print(f"Mesh saved to {mesh_fpath}")
|
| 240 |
|
| 241 |
+
return mesh_fpath
|
| 242 |
|
| 243 |
|
| 244 |
_HEADER_ = '''
|
|
|
|
| 341 |
gr.Markdown(_LINKS_)
|
| 342 |
gr.Markdown(_CITE_)
|
| 343 |
|
| 344 |
+
mv_images = gr.State()
|
| 345 |
+
|
| 346 |
submit.click(fn=check_input_image, inputs=[input_image]).success(
|
| 347 |
fn=preprocess,
|
| 348 |
inputs=[input_image, do_remove_background],
|
| 349 |
outputs=[processed_image],
|
| 350 |
).success(
|
| 351 |
+
fn=generate_mvs,
|
| 352 |
inputs=[processed_image, sample_steps, sample_seed],
|
| 353 |
+
outputs=[mv_images, mv_show_images]
|
| 354 |
+
|
| 355 |
+
).success(
|
| 356 |
+
fn=make3d,
|
| 357 |
+
inputs=[mv_images],
|
| 358 |
+
outputs=[output_model_obj]
|
| 359 |
)
|
| 360 |
|
| 361 |
demo.launch()
|
src/models/renderer/utils/renderer.py
CHANGED
|
@@ -68,8 +68,6 @@ def sample_from_planes(plane_axes, plane_features, coordinates, mode='bilinear',
|
|
| 68 |
|
| 69 |
coordinates = (2/box_warp) * coordinates # add specific box bounds
|
| 70 |
|
| 71 |
-
print('plane_axes', plane_axes.device, 'plane_features', plane_features.device, 'coordinates', coordinates.device)
|
| 72 |
-
|
| 73 |
projected_coordinates = project_onto_planes(plane_axes, coordinates).unsqueeze(1)
|
| 74 |
output_features = torch.nn.functional.grid_sample(
|
| 75 |
plane_features,
|
|
|
|
| 68 |
|
| 69 |
coordinates = (2/box_warp) * coordinates # add specific box bounds
|
| 70 |
|
|
|
|
|
|
|
| 71 |
projected_coordinates = project_onto_planes(plane_axes, coordinates).unsqueeze(1)
|
| 72 |
output_features = torch.nn.functional.grid_sample(
|
| 73 |
plane_features,
|