import gradio as gr from segment_anything import SamAutomaticMaskGenerator, sam_model_registry import supervision as sv from inference import DepthPredictor, SegmentPredictor from utils import create_3d_obj, create_3d_pc, point_cloud import numpy as np def snap(image, video): depth_predictor = DepthPredictor() depth_result = depth_predictor.predict(image) rgb_gltf_path = create_3d_obj(np.array(image), depth_result, path='./rgb.gltf') segment_predictor = SegmentPredictor() sam_result = segment_predictor.predict(image) fig = point_cloud(np.array(sam_result), depth_result) return [image, depth_result, sam_result, rgb_gltf_path, fig]#[depth_result, gltf_path, gltf_path] demo = gr.Interface( snap, inputs=[gr.Image(source="webcam", tool=None, label="Input Image", type="pil"), gr.Video(source="webcam")], outputs=[gr.Image(label="RGB"), gr.Image(label="predicted depth"), gr.Image(label="predicted segmentation"), gr.Model3D(label="3D mesh reconstruction - RGB", clear_color=[1.0, 1.0, 1.0, 1.0]), gr.Plot()] ) if __name__ == "__main__": demo.launch()