midas / app.py
adpro's picture
Update app.py
2d5e944 verified
raw
history blame
1.83 kB
from fastapi import FastAPI, File, UploadFile
import io
import numpy as np
from PIL import Image
import uvicorn
import cv2
from fastdepth import FastDepth
model = FastDepth(pretrained=True)
model.eval()
app = FastAPI()
import os
if not os.path.exists("fastdepth"):
os.system("git clone https://github.com/dwofk/fast-depth.git fastdepth")
from fastdepth import FastDepth # Import sau khi clone
app = FastAPI()
# 🟢 Load mô hình FastDepth
model = FastDepth(pretrained=True)
model.eval()
def analyzepath(image):
depth_map = model(image).squeeze().cpu().numpy()
return detect_path(depth_map) # Xử lý đường đi nhanh hơn
@app.post("/analyze_path/")
async def analyze_path(file: UploadFile = File(...)):
image_bytes = await file.read()
image = Image.open(io.BytesIO(image_bytes)).convert("L")
depth_map = np.array(image)
# 🟢 Lật ảnh (nếu cần)
flipped_depth_map = cv2.flip(depth_map, -1)
transform = torchvision.transforms.Compose([
torchvision.transforms.Resize((224, 224)),
torchvision.transforms.ToTensor(),
])
img_tensor = transform(image).unsqueeze(0).to(device)
with torch.no_grad():
depth_map = model(img_tensor).squeeze().cpu().numpy()
# 🟢 Phân tích đường đi
command = detect_path(flipped_depth_map)
return {"command": command}
def detect_path(depth_map):
_, thresh = cv2.threshold(depth_map, 200, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(contours) == 0:
return "forward"
left_region = np.mean(depth_map[:, :depth_map.shape[1]//3])
right_region = np.mean(depth_map[:, 2*depth_map.shape[1]//3:])
if left_region > right_region:
return "left"
else:
return "right"