File size: 1,825 Bytes
bef39ed b3652fb 02cc722 bef39ed f6e7520 d95f5f3 02cc722 2d5e944 3f318c7 d95f5f3 b3652fb 2d5e944 f6e7520 b3652fb f6e7520 b3652fb f6e7520 b3652fb f6e7520 b3652fb f6e7520 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
from fastapi import FastAPI, File, UploadFile
import io
import numpy as np
from PIL import Image
import uvicorn
import cv2
from fastdepth import FastDepth
model = FastDepth(pretrained=True)
model.eval()
app = FastAPI()
import os
if not os.path.exists("fastdepth"):
os.system("git clone https://github.com/dwofk/fast-depth.git fastdepth")
from fastdepth import FastDepth # Import sau khi clone
app = FastAPI()
# 🟢 Load mô hình FastDepth
model = FastDepth(pretrained=True)
model.eval()
def analyzepath(image):
depth_map = model(image).squeeze().cpu().numpy()
return detect_path(depth_map) # Xử lý đường đi nhanh hơn
@app.post("/analyze_path/")
async def analyze_path(file: UploadFile = File(...)):
image_bytes = await file.read()
image = Image.open(io.BytesIO(image_bytes)).convert("L")
depth_map = np.array(image)
# 🟢 Lật ảnh (nếu cần)
flipped_depth_map = cv2.flip(depth_map, -1)
transform = torchvision.transforms.Compose([
torchvision.transforms.Resize((224, 224)),
torchvision.transforms.ToTensor(),
])
img_tensor = transform(image).unsqueeze(0).to(device)
with torch.no_grad():
depth_map = model(img_tensor).squeeze().cpu().numpy()
# 🟢 Phân tích đường đi
command = detect_path(flipped_depth_map)
return {"command": command}
def detect_path(depth_map):
_, thresh = cv2.threshold(depth_map, 200, 255, cv2.THRESH_BINARY)
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(contours) == 0:
return "forward"
left_region = np.mean(depth_map[:, :depth_map.shape[1]//3])
right_region = np.mean(depth_map[:, 2*depth_map.shape[1]//3:])
if left_region > right_region:
return "left"
else:
return "right"
|