#4: | |
from fastapi import FastAPI, File, UploadFile, HTTPException | |
from fastapi.responses import JSONResponse | |
from fastapi.middleware.cors import CORSMiddleware | |
import numpy as np | |
import cv2 | |
import base64 | |
import logging | |
import os | |
from pathlib import Path | |
from face_recognition_system import FaceRecognitionSystem | |
# Set up logging | |
logging.basicConfig( | |
level=logging.INFO, | |
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' | |
) | |
logger = logging.getLogger(__name__) | |
# Initialize FastAPI app | |
app = FastAPI( | |
title="Face Recognition API", | |
description="API for face detection and recognition using InsightFace", | |
version="1.0.0" | |
) | |
# Add CORS middleware for Hugging Face Spaces | |
app.add_middleware( | |
CORSMiddleware, | |
allow_origins=["*"], | |
allow_credentials=True, | |
allow_methods=["*"], | |
allow_headers=["*"], | |
) | |
# Create necessary directories | |
MODELS_DIR = Path("models") | |
KNOWN_FACES_DIR = Path("known_faces") | |
for directory in [MODELS_DIR, KNOWN_FACES_DIR]: | |
directory.mkdir(parents=True, exist_ok=True) | |
# Initialize face recognition system | |
try: | |
face_recog_system = FaceRecognitionSystem( | |
model_name="buffalo_l", | |
model_root=str(MODELS_DIR) | |
) | |
face_recog_system.process_known_faces(str(KNOWN_FACES_DIR)) | |
logger.info("Face recognition system initialized successfully") | |
except Exception as e: | |
logger.error(f"Failed to initialize face recognition system: {e}") | |
raise | |
async def root(): | |
"""Health check endpoint""" | |
model_files = list(MODELS_DIR.glob("*")) | |
known_faces = list(KNOWN_FACES_DIR.glob("*")) | |
return { | |
"status": "ok", | |
"message": "Face Recognition API is running", | |
"model_directory": str(MODELS_DIR), | |
"known_faces_directory": str(KNOWN_FACES_DIR), | |
"model_files": [str(f.name) for f in model_files], | |
"known_faces": [str(f.name) for f in known_faces] | |
} | |
async def detect_faces(file: UploadFile = File(...)): | |
""" | |
Endpoint to detect and identify faces in an uploaded image | |
""" | |
try: | |
# Validate file type | |
if not file.content_type.startswith('image/'): | |
raise HTTPException(status_code=400, detail="File must be an image") | |
# Read and decode image | |
image_data = await file.read() | |
nparr = np.frombuffer(image_data, np.uint8) | |
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR) | |
if img is None: | |
raise HTTPException(status_code=400, detail="Failed to decode image") | |
# Process image | |
detected_img = face_recog_system.detect_and_identify(img) | |
# Encode processed image to base64 | |
success, buffer = cv2.imencode('.jpg', detected_img) | |
if not success: | |
raise HTTPException(status_code=500, detail="Failed to encode processed image") | |
processed_image_base64 = base64.b64encode(buffer).decode("utf-8") | |
# Prepare response | |
serializable_embeddings = { | |
name: embedding.tolist() if isinstance(embedding, np.ndarray) else embedding | |
for name, embedding in face_recog_system.known_face_embeddings.items() | |
} | |
return JSONResponse(content={ | |
"status": "success", | |
"processed_image": processed_image_base64, | |
"faces": serializable_embeddings | |
}) | |
except HTTPException as he: | |
raise he | |
except Exception as e: | |
logger.error(f"Error processing image: {e}") | |
raise HTTPException(status_code=500, detail=str(e)) | |
# Configuration for Hugging Face Spaces | |
if __name__ == "__main__": | |
import uvicorn | |
uvicorn.run(app, host="0.0.0.0", port=7860) | |
#3: | |
# from fastapi import FastAPI, File, UploadFile, HTTPException | |
# from fastapi.responses import JSONResponse | |
# from fastapi.middleware.cors import CORSMiddleware | |
# import numpy as np | |
# import cv2 | |
# import base64 | |
# import logging | |
# from face_recognition_system import FaceRecognitionSystem | |
# # Set up logging | |
# logging.basicConfig( | |
# level=logging.INFO, | |
# format='%(asctime)s - %(name)s - %(levelname)s - %(message)s' | |
# ) | |
# logger = logging.getLogger(__name__) | |
# # Initialize FastAPI app | |
# app = FastAPI( | |
# title="Face Recognition API", | |
# description="API for face detection and recognition using InsightFace", | |
# version="1.0.0" | |
# ) | |
# # Add CORS middleware for Hugging Face Spaces | |
# app.add_middleware( | |
# CORSMiddleware, | |
# allow_origins=["*"], | |
# allow_credentials=True, | |
# allow_methods=["*"], | |
# allow_headers=["*"], | |
# ) | |
# # Initialize face recognition system | |
# try: | |
# face_recog_system = FaceRecognitionSystem() | |
# # Update the path to match your Hugging Face Spaces directory structure | |
# face_recog_system.process_known_faces("known_faces") | |
# logger.info("Face recognition system initialized successfully") | |
# except Exception as e: | |
# logger.error(f"Failed to initialize face recognition system: {e}") | |
# raise | |
# @app.get("/") | |
# async def root(): | |
# """Health check endpoint""" | |
# return {"status": "ok", "message": "Face Recognition API is running"} | |
# @app.post("/detect_faces") | |
# async def detect_faces(file: UploadFile = File(...)): | |
# """ | |
# Endpoint to detect and identify faces in an uploaded image | |
# """ | |
# try: | |
# # Validate file type | |
# if not file.content_type.startswith('image/'): | |
# raise HTTPException(status_code=400, detail="File must be an image") | |
# # Read and decode image | |
# image_data = await file.read() | |
# nparr = np.frombuffer(image_data, np.uint8) | |
# img = cv2.imdecode(nparr, cv2.IMREAD_COLOR) | |
# if img is None: | |
# raise HTTPException(status_code=400, detail="Failed to decode image") | |
# # Process image | |
# detected_img = face_recog_system.detect_and_identify(img) | |
# # Encode processed image to base64 | |
# success, buffer = cv2.imencode('.jpg', detected_img) | |
# if not success: | |
# raise HTTPException(status_code=500, detail="Failed to encode processed image") | |
# processed_image_base64 = base64.b64encode(buffer).decode("utf-8") | |
# # Prepare response | |
# serializable_embeddings = { | |
# name: embedding.tolist() if isinstance(embedding, np.ndarray) else embedding | |
# for name, embedding in face_recog_system.known_face_embeddings.items() | |
# } | |
# return JSONResponse(content={ | |
# "status": "success", | |
# "processed_image": processed_image_base64, | |
# "faces": serializable_embeddings | |
# }) | |
# except HTTPException as he: | |
# raise he | |
# except Exception as e: | |
# logger.error(f"Error processing image: {e}") | |
# raise HTTPException(status_code=500, detail=str(e)) | |
# # Configuration for Hugging Face Spaces | |
# if __name__ == "__main__": | |
# import uvicorn | |
# uvicorn.run(app, host="0.0.0.0", port=7860) | |
# initial: | |
# from fastapi import FastAPI | |
# app = FastAPI() | |
# @app.get("/") | |
# def home(): | |
# '''Fuck Everyday Bitch''' | |
# return {"Everything's": "OK bTICH✅"} | |
# final: | |
# #2 | |
# from fastapi import FastAPI, File, UploadFile | |
# from fastapi.responses import JSONResponse | |
# import numpy as np | |
# import cv2 | |
# import base64 | |
# import logging | |
# from face_recognition_system import FaceRecognitionSystem # import your class | |
# # Set up logging | |
# logging.basicConfig(level=logging.INFO) | |
# app = FastAPI() | |
# face_recog_system = FaceRecognitionSystem() | |
# # Load known faces | |
# try: | |
# face_recog_system.process_known_faces("./data/known/custom/") | |
# logging.info("Loaded known faces successfully.") | |
# except Exception as e: | |
# logging.error(f"Error loading known faces: {e}") | |
# @app.post("/detect_faces") | |
# async def detect_faces(file: UploadFile = File(...)): | |
# try: | |
# # Read and decode image from the uploaded file | |
# image_data = await file.read() | |
# nparr = np.frombuffer(image_data, np.uint8) | |
# img = cv2.imdecode(nparr, cv2.IMREAD_COLOR) | |
# # Check if image is loaded | |
# if img is None: | |
# logging.error("Failed to decode image. Ensure the uploaded file is a valid image.") | |
# return JSONResponse(content={"error": "Invalid image file"}, status_code=400) | |
# # Run detection and identification | |
# detected_img = face_recog_system.detect_and_identify(img) | |
# # Encode imNONOFage to base64 | |
# success, buffer = cv2.imencode('.jpg', detected_img) | |
# if not success: | |
# logging.error("Image encoding failed.") | |
# return JSONResponse(content={"error": "Image encoding failed"}, status_code=500) | |
# processed_image_base64 = base64.b64encode(buffer).decode("utf-8") | |
# # Optional: Check if face embeddings were created | |
# if not face_recog_system.known_face_embeddings: | |
# logging.warning("No faces detected.") | |
# # NOTE: | |
# # Convert numpy arrays to lists for JSON serialization | |
# serializable_embeddings = { | |
# name: embedding.tolist() if isinstance(embedding, np.ndarray) else embedding | |
# for name, embedding in face_recog_system.known_face_embeddings.items() | |
# } | |
# return JSONResponse(content={ | |
# "processed_image": processed_image_base64, | |
# "faces": serializable_embeddings | |
# }) | |
# # return JSONResponse(content={"processed_image": processed_image_base64, "faces": face_recog_system.known_face_embeddings}) | |
# except Exception as e: | |
# logging.error(f"Error processing image: {e}") | |
# return JSONResponse(content={"error": "An error occurred while processing the image"}, status_code=500) | |
# # main: | |
# # NOTE: ALWAYS FIRST CHECK IPv4-Address via: <ipconfig> | |
# # import uvicorn | |
# # if __name__ == "__main__": | |
# # uvicorn.run(app='app:app', | |
# # host='192.168.1.17', port=7860, reload=True) | |