import gradio as gr import torch from huggingface_hub import hf_hub_download from depth_anything_v2.dpt import DepthAnythingV2 def dummy_infer(img): return 255 - img # --- LOAD THE MODEL, BUT DON'T USE IT --- DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu' model_configs = { 'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]}, } encoder = 'vitl' model = DepthAnythingV2(**model_configs[encoder]) model_path = hf_hub_download( repo_id="depth-anything/Depth-Anything-V2-Large", filename=f"depth_anything_v2_{encoder}.pth", repo_type="model" ) state_dict = torch.load(model_path, map_location="cpu") model.load_state_dict(state_dict) model = model.to(DEVICE).eval() # --- END MODEL LOADING --- iface = gr.Interface(fn=dummy_infer, inputs=gr.Image(type="numpy"), outputs=gr.Image()) iface.launch()