Spaces:
Sleeping
Sleeping
Commit
·
7a1529d
0
Parent(s):
initial commit
Browse files- 1I_png_jpg.rf.2e20f1e5ebc107554cff15f1de083b1b.jpg +0 -0
- __pycache__/main.cpython-311.pyc +0 -0
- main.py +127 -0
- output_img.png +0 -0
- routes/__pycache__/detection.cpython-311.pyc +0 -0
- routes/__pycache__/fen_generator.cpython-311.pyc +0 -0
- routes/__pycache__/segmentation.cpython-311.pyc +0 -0
- routes/detection.py +29 -0
- routes/fen_generator.py +114 -0
- routes/segmentation.py +23 -0
1I_png_jpg.rf.2e20f1e5ebc107554cff15f1de083b1b.jpg
ADDED
![]() |
__pycache__/main.cpython-311.pyc
ADDED
Binary file (6.69 kB). View file
|
|
main.py
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import io
|
2 |
+
from fastapi import FastAPI, File, UploadFile
|
3 |
+
from fastapi.responses import JSONResponse, StreamingResponse
|
4 |
+
from PIL import Image, UnidentifiedImageError
|
5 |
+
from routes.segmentation import segment_chess_board
|
6 |
+
from routes.detection import detect_pieces
|
7 |
+
from routes.fen_generator import gen_fen
|
8 |
+
from typing import List, Dict, Any
|
9 |
+
from pydantic import BaseModel
|
10 |
+
|
11 |
+
app = FastAPI()
|
12 |
+
|
13 |
+
class DetectionResults(BaseModel):
|
14 |
+
boxes: list
|
15 |
+
confidences: list
|
16 |
+
classes: list
|
17 |
+
|
18 |
+
class FenRequest(BaseModel):
|
19 |
+
detections: DetectionResults
|
20 |
+
perspective: str
|
21 |
+
|
22 |
+
@app.get("/")
|
23 |
+
async def read_root():
|
24 |
+
return {
|
25 |
+
"name": "Narendra",
|
26 |
+
"age": 20,
|
27 |
+
"Gender": "Male"
|
28 |
+
}
|
29 |
+
|
30 |
+
@app.post("/getSeg")
|
31 |
+
async def get_seg(file: UploadFile = File(...)):
|
32 |
+
print(f'Image received: {file.filename}')
|
33 |
+
|
34 |
+
try:
|
35 |
+
image_content = await file.read()
|
36 |
+
if not image_content:
|
37 |
+
return JSONResponse(content={"error": "Empty file uploaded"}, status_code=400)
|
38 |
+
|
39 |
+
try:
|
40 |
+
image = Image.open(io.BytesIO(image_content))
|
41 |
+
except UnidentifiedImageError:
|
42 |
+
return JSONResponse(content={"error": "Invalid image format"}, status_code=400)
|
43 |
+
|
44 |
+
# If segment_chess_board is async, use `await`, otherwise remove `await`
|
45 |
+
segmented_image = await segment_chess_board(image)
|
46 |
+
|
47 |
+
if isinstance(segmented_image, dict):
|
48 |
+
return JSONResponse(content=segmented_image, status_code=400)
|
49 |
+
|
50 |
+
# Save to in-memory bytes
|
51 |
+
img_bytes = io.BytesIO()
|
52 |
+
segmented_image.save(img_bytes, format="PNG")
|
53 |
+
img_bytes.seek(0)
|
54 |
+
|
55 |
+
print("Image successfully processed and returned")
|
56 |
+
return StreamingResponse(
|
57 |
+
img_bytes,
|
58 |
+
media_type="image/png",
|
59 |
+
headers={"Content-Disposition": "inline; filename=output.png"}
|
60 |
+
)
|
61 |
+
|
62 |
+
|
63 |
+
except Exception as e:
|
64 |
+
return JSONResponse(content={"error": str(e)}, status_code=500)
|
65 |
+
|
66 |
+
|
67 |
+
@app.post("/getCoords")
|
68 |
+
async def get_coords(file: UploadFile = File(...)):
|
69 |
+
try:
|
70 |
+
image_content = await file.read()
|
71 |
+
|
72 |
+
if not image_content:
|
73 |
+
print("No image found")
|
74 |
+
return JSONResponse(content={"error": "Empty file uploaded"}, status_code=400)
|
75 |
+
|
76 |
+
try:
|
77 |
+
image = Image.open(io.BytesIO(image_content))
|
78 |
+
except UnidentifiedImageError:
|
79 |
+
return JSONResponse(content={"error": "Invalid image format"}, status_code=400)
|
80 |
+
|
81 |
+
detection_results = await detect_pieces(image)
|
82 |
+
|
83 |
+
if "error" in detection_results:
|
84 |
+
return JSONResponse(content=detection_results, status_code=400)
|
85 |
+
|
86 |
+
print("Image successfully processed and returned")
|
87 |
+
return JSONResponse(content={"detections": detection_results}, status_code=200)
|
88 |
+
|
89 |
+
except Exception as e:
|
90 |
+
print(f"Unexpected error: {str(e)}")
|
91 |
+
return JSONResponse(content={"error": "Unexpected error occurred", "details": str(e)}, status_code=500)
|
92 |
+
|
93 |
+
|
94 |
+
@app.post("/getFen")
|
95 |
+
async def get_fen(request: FenRequest):
|
96 |
+
results = request.detections
|
97 |
+
perspective = request.perspective
|
98 |
+
try:
|
99 |
+
if perspective not in ["w", "b"]:
|
100 |
+
return JSONResponse(
|
101 |
+
content={"error": "Perspective must be 'w' (white) or 'b' (black)"},
|
102 |
+
status_code=400
|
103 |
+
)
|
104 |
+
|
105 |
+
if not results.boxes or not results.confidences or not results.classes:
|
106 |
+
return JSONResponse(
|
107 |
+
content={"error": "Invalid input", "details": "Missing required fields"},
|
108 |
+
status_code=400
|
109 |
+
)
|
110 |
+
|
111 |
+
print(results.model_dump())
|
112 |
+
|
113 |
+
fen = gen_fen(results.model_dump(), perspective)
|
114 |
+
|
115 |
+
if not fen:
|
116 |
+
return JSONResponse(
|
117 |
+
content={"error": "FEN generation failed", "details": "Invalid input data"},
|
118 |
+
status_code=500
|
119 |
+
)
|
120 |
+
|
121 |
+
return JSONResponse(content={"FEN": fen}, status_code=200)
|
122 |
+
|
123 |
+
except Exception as e:
|
124 |
+
return JSONResponse(
|
125 |
+
content={"error": "Unexpected error occurred", "details": str(e)},
|
126 |
+
status_code=500
|
127 |
+
)
|
output_img.png
ADDED
![]() |
routes/__pycache__/detection.cpython-311.pyc
ADDED
Binary file (1.5 kB). View file
|
|
routes/__pycache__/fen_generator.cpython-311.pyc
ADDED
Binary file (5.24 kB). View file
|
|
routes/__pycache__/segmentation.cpython-311.pyc
ADDED
Binary file (1.27 kB). View file
|
|
routes/detection.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ultralytics import YOLO
|
2 |
+
from PIL import Image
|
3 |
+
detect_model = YOLO(r'D:\venv\chess-vision\models\chessDetection3d.pt')
|
4 |
+
|
5 |
+
async def detect_pieces(image : Image):
|
6 |
+
if image is None:
|
7 |
+
print("No image is there")
|
8 |
+
return {"error" : "No image detected"}
|
9 |
+
|
10 |
+
results = detect_model.predict(image)
|
11 |
+
|
12 |
+
if not results or len(results) == 0:
|
13 |
+
print("No results are there")
|
14 |
+
return {"error" : "No results found"}
|
15 |
+
|
16 |
+
boxes = results[0].boxes.xyxy.tolist()
|
17 |
+
confidences = results[0].boxes.conf.tolist()
|
18 |
+
classes = results[0].boxes.cls.tolist()
|
19 |
+
|
20 |
+
class_names = []
|
21 |
+
|
22 |
+
for idx in classes:
|
23 |
+
class_names.append(detect_model.names[idx])
|
24 |
+
|
25 |
+
return {
|
26 |
+
"boxes": boxes,
|
27 |
+
"confidences": confidences,
|
28 |
+
"classes": class_names
|
29 |
+
}
|
routes/fen_generator.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
|
3 |
+
FEN_MAPPING = {
|
4 |
+
"black-pawn": "p", "black-rook": "r", "black-knight": "n", "black-bishop": "b", "black-queen": "q", "black-king": "k",
|
5 |
+
"white-pawn": "P", "white-rook": "R", "white-knight": "N", "white-bishop": "B", "white-queen": "Q", "white-king": "K"
|
6 |
+
}
|
7 |
+
|
8 |
+
# there are some issues in the code i,e in the line 87 , possible modification for that is rank = int(grid_position[1]) - 1
|
9 |
+
|
10 |
+
# Grid settings
|
11 |
+
border = 0
|
12 |
+
grid_size = 224
|
13 |
+
block_size = grid_size // 8
|
14 |
+
|
15 |
+
x_labels = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']
|
16 |
+
y_labels = [8, 7, 6, 5, 4, 3, 2, 1]
|
17 |
+
|
18 |
+
def get_grid_coordinate(pixel_x, pixel_y, perspective):
|
19 |
+
try:
|
20 |
+
adjusted_x = pixel_x - border
|
21 |
+
adjusted_y = pixel_y - border
|
22 |
+
|
23 |
+
if adjusted_x < 0 or adjusted_y < 0 or adjusted_x >= grid_size or adjusted_y >= grid_size:
|
24 |
+
return None
|
25 |
+
|
26 |
+
x_index = adjusted_x // block_size
|
27 |
+
y_index = adjusted_y // block_size
|
28 |
+
|
29 |
+
if x_index < 0 or x_index >= len(x_labels) or y_index < 0 or y_index >= len(y_labels):
|
30 |
+
return None
|
31 |
+
|
32 |
+
if perspective == "b":
|
33 |
+
x_index = 7 - x_index
|
34 |
+
y_index = 7 - y_index
|
35 |
+
|
36 |
+
file = x_labels[x_index]
|
37 |
+
rank = y_labels[y_index]
|
38 |
+
|
39 |
+
return f"{file}{rank}"
|
40 |
+
except Exception as e:
|
41 |
+
print(f"Error in get_grid_coordinate: {e}")
|
42 |
+
return None
|
43 |
+
|
44 |
+
def gen_fen(result: dict, p: str):
|
45 |
+
try:
|
46 |
+
if not isinstance(result, dict):
|
47 |
+
print("Error: Expected a dictionary for result")
|
48 |
+
return None
|
49 |
+
|
50 |
+
boxes = result.get("boxes", [])
|
51 |
+
classes = result.get("classes", [])
|
52 |
+
|
53 |
+
if not boxes or not classes:
|
54 |
+
print("Error: Missing 'boxes' or 'classes' in input")
|
55 |
+
return None
|
56 |
+
|
57 |
+
if len(boxes) != len(classes):
|
58 |
+
print("Error: Mismatch between bounding boxes and class labels")
|
59 |
+
return None
|
60 |
+
|
61 |
+
height, width = 224, 224
|
62 |
+
board = [["8"] * 8 for _ in range(8)]
|
63 |
+
|
64 |
+
for box, class_name in zip(boxes, classes):
|
65 |
+
if not isinstance(box, (list, tuple)) or len(box) != 4:
|
66 |
+
print(f"Skipping invalid box: {box}")
|
67 |
+
continue
|
68 |
+
|
69 |
+
fen_piece = FEN_MAPPING.get(class_name, None)
|
70 |
+
if not fen_piece:
|
71 |
+
print(f"Skipping unrecognized piece: {class_name}")
|
72 |
+
continue
|
73 |
+
|
74 |
+
try:
|
75 |
+
x_min, y_min, x_max, y_max = map(int, box)
|
76 |
+
except ValueError:
|
77 |
+
print(f"Skipping box with invalid values: {box}")
|
78 |
+
continue
|
79 |
+
|
80 |
+
center_x, center_y = (x_min + x_max) / 2, (y_min + y_max) / 2
|
81 |
+
pixel_x = int(center_x)
|
82 |
+
pixel_y = int(height - center_y)
|
83 |
+
|
84 |
+
grid_position = get_grid_coordinate(pixel_x, pixel_y, p)
|
85 |
+
if grid_position:
|
86 |
+
file = ord(grid_position[0]) - ord('a')
|
87 |
+
rank = int(grid_position[1]) - 1
|
88 |
+
|
89 |
+
if 0 <= rank < 8 and 0 <= file < 8:
|
90 |
+
board[rank][file] = fen_piece
|
91 |
+
else:
|
92 |
+
print(f"Skipping out-of-bounds grid position: {grid_position}")
|
93 |
+
|
94 |
+
fen_rows = []
|
95 |
+
for row in board:
|
96 |
+
fen_row = ""
|
97 |
+
empty_count = 0
|
98 |
+
for cell in row:
|
99 |
+
if cell == "8":
|
100 |
+
empty_count += 1
|
101 |
+
else:
|
102 |
+
if empty_count > 0:
|
103 |
+
fen_row += str(empty_count)
|
104 |
+
empty_count = 0
|
105 |
+
fen_row += cell
|
106 |
+
if empty_count > 0:
|
107 |
+
fen_row += str(empty_count)
|
108 |
+
fen_rows.append(fen_row) # FIXED: Ensured last row is added
|
109 |
+
|
110 |
+
return "/".join(fen_rows)
|
111 |
+
|
112 |
+
except Exception as e:
|
113 |
+
print(f"Error in gen_fen: {e}")
|
114 |
+
return None
|
routes/segmentation.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from ultralytics import YOLO
|
2 |
+
from PIL import Image
|
3 |
+
|
4 |
+
seg_model = YOLO(r'D:\venv\chess-vision\models\SegModel (1).pt')
|
5 |
+
|
6 |
+
async def segment_chess_board(image : Image):
|
7 |
+
if image is None:
|
8 |
+
return {"error" : "No image found" }
|
9 |
+
|
10 |
+
results = seg_model.predict(image)
|
11 |
+
|
12 |
+
if not results or len(results) == 0:
|
13 |
+
return {"error" : "No chessboard detected"}
|
14 |
+
|
15 |
+
if len(results) > 1:
|
16 |
+
return {"error" : "Multiple chess boards found in the image"}
|
17 |
+
|
18 |
+
xywh = results[0].boxes.xyxy[0].tolist()
|
19 |
+
x_min, y_min, x_max, y_max = map(int, xywh)
|
20 |
+
|
21 |
+
segmented_image = image.crop((x_min, y_min, x_max, y_max))
|
22 |
+
|
23 |
+
return segmented_image
|