jens commited on
Commit
371a984
·
1 Parent(s): fd2c88a
Files changed (1) hide show
  1. app.py +10 -20
app.py CHANGED
@@ -24,46 +24,36 @@ def produce_3d_reconstruction(image):
24
  def produce_point_cloud(depth_map, segmentation_map):
25
  return point_cloud(np.array(segmentation_map), depth_map)
26
 
27
- def snap(image, depth_map, segmentation_map, point_cloud, video):
28
- if depth_map:
29
- depth_result = produce_depth_map(image)
30
- else:
31
- depth_result = None
32
-
33
- if segmentation_map:
34
- sam_result = produce_segmentation_map(image)
35
- else:
36
- sam_result = None
37
-
38
- if point_cloud:
39
- point_cloud_fig = produce_point_cloud(depth_result, sam_result)
40
- else:
41
- point_cloud_fig = None
42
-
43
  if video:
44
  # Add video processing here if needed
45
  pass
46
 
47
- return [image, depth_result, sam_result, point_cloud_fig]
48
 
49
  # Interface inputs
50
  image_input = gr.Image(source="webcam", tool=None, label="Input Image", type="pil")
51
  depth_map_button = gr.Button(label="Produce Depth Map", value=False)
52
  segmentation_map_button = gr.Button(label="Produce Segmentation Map", value=False)
53
- point_cloud_button = gr.Button(label="Produce Point Cloud", value=False)
54
  video_input = gr.Video(source="webcam")
55
 
56
  # Interface outputs
57
  output_image = gr.Image(label="RGB")
58
  output_depth_map = gr.Image(label="Predicted Depth")
59
  output_segmentation_map = gr.Image(label="Predicted Segmentation")
 
60
  output_point_cloud = gr.Plot(label="Point Cloud")
61
 
62
  # Interface
63
  demo = gr.Interface(
64
  snap,
65
- inputs=[image_input, depth_map_button, segmentation_map_button, point_cloud_button, video_input],
66
- outputs=[output_image, output_depth_map, output_segmentation_map, None, output_point_cloud]
67
  )
68
 
69
  if __name__ == "__main__":
 
24
  def produce_point_cloud(depth_map, segmentation_map):
25
  return point_cloud(np.array(segmentation_map), depth_map)
26
 
27
+ def snap(image, depth_map, segmentation_map, video):
28
+ depth_result = produce_depth_map(image) if depth_map else None
29
+ sam_result = produce_segmentation_map(image) if segmentation_map else None
30
+ rgb_gltf_path = produce_3d_reconstruction(image) if depth_map else None
31
+ point_cloud_fig = produce_point_cloud(depth_result, sam_result) if segmentation_map else None
32
+
 
 
 
 
 
 
 
 
 
 
33
  if video:
34
  # Add video processing here if needed
35
  pass
36
 
37
+ return [image, depth_result, sam_result, rgb_gltf_path, point_cloud_fig]
38
 
39
  # Interface inputs
40
  image_input = gr.Image(source="webcam", tool=None, label="Input Image", type="pil")
41
  depth_map_button = gr.Button(label="Produce Depth Map", value=False)
42
  segmentation_map_button = gr.Button(label="Produce Segmentation Map", value=False)
 
43
  video_input = gr.Video(source="webcam")
44
 
45
  # Interface outputs
46
  output_image = gr.Image(label="RGB")
47
  output_depth_map = gr.Image(label="Predicted Depth")
48
  output_segmentation_map = gr.Image(label="Predicted Segmentation")
49
+ output_3d_reconstruction = gr.Model3D(label="3D mesh reconstruction - RGB", clear_color=[1.0, 1.0, 1.0, 1.0])
50
  output_point_cloud = gr.Plot(label="Point Cloud")
51
 
52
  # Interface
53
  demo = gr.Interface(
54
  snap,
55
+ inputs=[image_input, depth_map_button, segmentation_map_button, video_input],
56
+ outputs=[output_image, output_depth_map, output_segmentation_map, output_3d_reconstruction, output_point_cloud]
57
  )
58
 
59
  if __name__ == "__main__":