from fastapi import FastAPI, UploadFile, File import uvicorn import cv2 import numpy as np import torch import torchvision.transforms as T from PIL import Image import io app = FastAPI() # Load AI model MiDaS midas = torch.hub.load("intel-isl/MiDaS", "MiDaS_small") midas.eval() transform = T.Compose([T.Resize((256, 256)), T.ToTensor(), T.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) @app.post("/upload/") async def upload_image(file: UploadFile = File(...)): image_bytes = await file.read() image = Image.open(io.BytesIO(image_bytes)).convert("RGB") # Convert to tensor & run AI model img_tensor = transform(image).unsqueeze(0) with torch.no_grad(): depth_map = midas(img_tensor).squeeze().cpu().numpy() # Normalize depth map depth_map = cv2.normalize(depth_map, None, 0, 255, cv2.NORM_MINMAX).astype(np.uint8) depth_img = cv2.applyColorMap(depth_map, cv2.COLORMAP_JET) _, buffer = cv2.imencode(".jpg", depth_img) return {"depth_map": buffer.tobytes()} if __name__ == "__main__": uvicorn.run(app, host="0.0.0.0", port=7860)