Commit
·
45f24a2
1
Parent(s):
1f840b7
FEAT Fastapi
Browse files- app.py +16 -126
- gradio_app.py +126 -0
- utils/api.py +82 -0
- utils/gunicorn_config.py +3 -0
app.py
CHANGED
@@ -1,126 +1,16 @@
|
|
1 |
-
import
|
2 |
-
import
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
from models.common import DetectMultiBackend
|
18 |
-
from utils.general import (check_img_size, non_max_suppression, scale_boxes)
|
19 |
-
from utils.plots import Annotator, colors
|
20 |
-
from utils.torch_utils import select_device
|
21 |
-
|
22 |
-
# YOLOv9 모델 로드
|
23 |
-
device = select_device('')
|
24 |
-
model = DetectMultiBackend('./weights/nsfw_detector_e_rok.pt', device=device, dnn=False, data=None, fp16=False)
|
25 |
-
stride, names, pt = model.stride, model.names, model.pt
|
26 |
-
imgsz = check_img_size((640, 640), s=stride) # check image size
|
27 |
-
|
28 |
-
def process_image(image, conf_threshold, iou_threshold, label_mode):
|
29 |
-
# 이미지 전처리
|
30 |
-
im = torch.from_numpy(image).to(device).permute(2, 0, 1) # HWC to CHW
|
31 |
-
im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
|
32 |
-
im /= 255 # 0 - 255 to 0.0 - 1.0
|
33 |
-
if len(im.shape) == 3:
|
34 |
-
im = im[None] # expand for batch dim
|
35 |
-
|
36 |
-
# 이미지 크기 조정
|
37 |
-
im = torch.nn.functional.interpolate(im, size=imgsz, mode='bilinear', align_corners=False)
|
38 |
-
|
39 |
-
# 추론
|
40 |
-
pred = model(im, augment=False, visualize=False)
|
41 |
-
if isinstance(pred, list):
|
42 |
-
pred = pred[0] # 첫 번째 요소 선택 (일반적으로 단일 이미지 추론의 경우)
|
43 |
-
|
44 |
-
# NMS
|
45 |
-
pred = non_max_suppression(pred, conf_threshold, iou_threshold, None, False, max_det=1000)
|
46 |
-
|
47 |
-
# 결과 처리
|
48 |
-
img = image.copy()
|
49 |
-
|
50 |
-
harmful_label_list = []
|
51 |
-
annotations = []
|
52 |
-
|
53 |
-
for i, det in enumerate(pred): # per image
|
54 |
-
if len(det):
|
55 |
-
# Rescale boxes from img_size to im0 size
|
56 |
-
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], img.shape).round()
|
57 |
-
|
58 |
-
# Write results
|
59 |
-
for *xyxy, conf, cls in reversed(det):
|
60 |
-
c = int(cls) # integer class
|
61 |
-
if c != 6:
|
62 |
-
harmful_label_list.append(c)
|
63 |
-
|
64 |
-
annotation = {
|
65 |
-
'xyxy': xyxy,
|
66 |
-
'conf': conf,
|
67 |
-
'cls': c,
|
68 |
-
'label': f"{names[c]} {conf:.2f}" if label_mode == "Draw Confidence" else f"{names[c]}"
|
69 |
-
}
|
70 |
-
annotations.append(annotation)
|
71 |
-
|
72 |
-
if harmful_label_list:
|
73 |
-
gr.Error("Warning, this is a harmful image.")
|
74 |
-
# 이미지 전체를 흐리게 처리
|
75 |
-
img = cv2.GaussianBlur(img, (125, 125), 0)
|
76 |
-
else:
|
77 |
-
gr.Info('This is a safe image.')
|
78 |
-
|
79 |
-
# Annotator 적용
|
80 |
-
annotator = Annotator(img, line_width=3, example=str(names))
|
81 |
-
|
82 |
-
for ann in annotations:
|
83 |
-
if label_mode == "Draw box":
|
84 |
-
annotator.box_label(ann['xyxy'], None, color=colors(ann['cls'], True))
|
85 |
-
elif label_mode in ["Draw Label", "Draw Confidence"]:
|
86 |
-
annotator.box_label(ann['xyxy'], ann['label'], color=colors(ann['cls'], True))
|
87 |
-
elif label_mode == "Censor Predictions":
|
88 |
-
cv2.rectangle(img, (int(ann['xyxy'][0]), int(ann['xyxy'][1])), (int(ann['xyxy'][2]), int(ann['xyxy'][3])), (0, 0, 0), -1)
|
89 |
-
|
90 |
-
return annotator.result()
|
91 |
-
|
92 |
-
def detect_nsfw(input_image, conf_threshold, iou_threshold, label_mode):
|
93 |
-
if isinstance(input_image, str): # URL input
|
94 |
-
response = requests.get(input_image)
|
95 |
-
image = Image.open(BytesIO(response.content))
|
96 |
-
else: # File upload
|
97 |
-
image = Image.fromarray(input_image)
|
98 |
-
|
99 |
-
image = np.array(image)
|
100 |
-
if len(image.shape) == 2: # grayscale
|
101 |
-
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
|
102 |
-
elif image.shape[2] == 4: # RGBA
|
103 |
-
image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
|
104 |
-
|
105 |
-
# 이미지 크기 조정
|
106 |
-
image = cv2.resize(image, imgsz)
|
107 |
-
|
108 |
-
processed_image = process_image(image, conf_threshold, iou_threshold, label_mode)
|
109 |
-
return processed_image
|
110 |
-
|
111 |
-
# Gradio 인터페이스 설정
|
112 |
-
demo = gr.Interface(
|
113 |
-
fn=detect_nsfw,
|
114 |
-
inputs=[
|
115 |
-
gr.Image(type="numpy", label="Upload an image or enter a URL"),
|
116 |
-
gr.Slider(0, 1, value=0.45, label="Confidence Threshold"),
|
117 |
-
gr.Slider(0, 1, value=0.45, label="Overlap Threshold"),
|
118 |
-
gr.Dropdown(["Draw box", "Draw Label", "Draw Confidence", "Censor Predictions"], label="Label Display Mode", value="Draw box")
|
119 |
-
],
|
120 |
-
outputs=gr.Image(type="numpy", label="Processed Image"),
|
121 |
-
title="YOLOv9 NSFW Content Detection",
|
122 |
-
description="Upload an image or enter a URL to detect NSFW content using YOLOv9."
|
123 |
-
)
|
124 |
-
|
125 |
-
if __name__ == "__main__":
|
126 |
-
demo.launch(server_name="0.0.0.0")
|
|
|
1 |
+
from fastapi import FastAPI, File, UploadFile, Form
|
2 |
+
from utils.api import process_image_api
|
3 |
+
|
4 |
+
app = FastAPI()
|
5 |
+
|
6 |
+
@app.post("/process_image/")
|
7 |
+
async def process_image_endpoint(
|
8 |
+
file: UploadFile = File(...),
|
9 |
+
conf_threshold: float = Form(0.25),
|
10 |
+
iou_threshold: float = Form(0.45),
|
11 |
+
label_mode: str = Form("Draw Confidence")
|
12 |
+
):
|
13 |
+
return await process_image_api(file, conf_threshold, iou_threshold, label_mode)
|
14 |
+
|
15 |
+
# Gunicorn용 app 변수
|
16 |
+
application = app
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gradio_app.py
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import cv2
|
3 |
+
import numpy as np
|
4 |
+
from PIL import Image
|
5 |
+
import requests
|
6 |
+
from io import BytesIO
|
7 |
+
import torch
|
8 |
+
import sys
|
9 |
+
from pathlib import Path
|
10 |
+
import os
|
11 |
+
FILE = Path(__file__).resolve()
|
12 |
+
ROOT = FILE.parents[0] # YOLOv5 root directory
|
13 |
+
if str(ROOT) not in sys.path:
|
14 |
+
sys.path.append(str(ROOT)) # add ROOT to PATH
|
15 |
+
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
|
16 |
+
|
17 |
+
from models.common import DetectMultiBackend
|
18 |
+
from utils.general import (check_img_size, non_max_suppression, scale_boxes)
|
19 |
+
from utils.plots import Annotator, colors
|
20 |
+
from utils.torch_utils import select_device
|
21 |
+
|
22 |
+
# YOLOv9 모델 로드
|
23 |
+
device = select_device('')
|
24 |
+
model = DetectMultiBackend('./weights/nsfw_detector_e_rok.pt', device=device, dnn=False, data=None, fp16=False)
|
25 |
+
stride, names, pt = model.stride, model.names, model.pt
|
26 |
+
imgsz = check_img_size((640, 640), s=stride) # check image size
|
27 |
+
|
28 |
+
def process_image(image, conf_threshold, iou_threshold, label_mode):
|
29 |
+
# 이미지 전처리
|
30 |
+
im = torch.from_numpy(image).to(device).permute(2, 0, 1) # HWC to CHW
|
31 |
+
im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
|
32 |
+
im /= 255 # 0 - 255 to 0.0 - 1.0
|
33 |
+
if len(im.shape) == 3:
|
34 |
+
im = im[None] # expand for batch dim
|
35 |
+
|
36 |
+
# 이미지 크기 조정
|
37 |
+
im = torch.nn.functional.interpolate(im, size=imgsz, mode='bilinear', align_corners=False)
|
38 |
+
|
39 |
+
# 추론
|
40 |
+
pred = model(im, augment=False, visualize=False)
|
41 |
+
if isinstance(pred, list):
|
42 |
+
pred = pred[0] # 첫 번째 요소 선택 (일반적으로 단일 이미지 추론의 경우)
|
43 |
+
|
44 |
+
# NMS
|
45 |
+
pred = non_max_suppression(pred, conf_threshold, iou_threshold, None, False, max_det=1000)
|
46 |
+
|
47 |
+
# 결과 처리
|
48 |
+
img = image.copy()
|
49 |
+
|
50 |
+
harmful_label_list = []
|
51 |
+
annotations = []
|
52 |
+
|
53 |
+
for i, det in enumerate(pred): # per image
|
54 |
+
if len(det):
|
55 |
+
# Rescale boxes from img_size to im0 size
|
56 |
+
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], img.shape).round()
|
57 |
+
|
58 |
+
# Write results
|
59 |
+
for *xyxy, conf, cls in reversed(det):
|
60 |
+
c = int(cls) # integer class
|
61 |
+
if c != 6:
|
62 |
+
harmful_label_list.append(c)
|
63 |
+
|
64 |
+
annotation = {
|
65 |
+
'xyxy': xyxy,
|
66 |
+
'conf': conf,
|
67 |
+
'cls': c,
|
68 |
+
'label': f"{names[c]} {conf:.2f}" if label_mode == "Draw Confidence" else f"{names[c]}"
|
69 |
+
}
|
70 |
+
annotations.append(annotation)
|
71 |
+
|
72 |
+
if harmful_label_list:
|
73 |
+
gr.Error("Warning, this is a harmful image.")
|
74 |
+
# 이미지 전체를 흐리게 처리
|
75 |
+
img = cv2.GaussianBlur(img, (125, 125), 0)
|
76 |
+
else:
|
77 |
+
gr.Info('This is a safe image.')
|
78 |
+
|
79 |
+
# Annotator 적용
|
80 |
+
annotator = Annotator(img, line_width=3, example=str(names))
|
81 |
+
|
82 |
+
for ann in annotations:
|
83 |
+
if label_mode == "Draw box":
|
84 |
+
annotator.box_label(ann['xyxy'], None, color=colors(ann['cls'], True))
|
85 |
+
elif label_mode in ["Draw Label", "Draw Confidence"]:
|
86 |
+
annotator.box_label(ann['xyxy'], ann['label'], color=colors(ann['cls'], True))
|
87 |
+
elif label_mode == "Censor Predictions":
|
88 |
+
cv2.rectangle(img, (int(ann['xyxy'][0]), int(ann['xyxy'][1])), (int(ann['xyxy'][2]), int(ann['xyxy'][3])), (0, 0, 0), -1)
|
89 |
+
|
90 |
+
return annotator.result()
|
91 |
+
|
92 |
+
def detect_nsfw(input_image, conf_threshold, iou_threshold, label_mode):
|
93 |
+
if isinstance(input_image, str): # URL input
|
94 |
+
response = requests.get(input_image)
|
95 |
+
image = Image.open(BytesIO(response.content))
|
96 |
+
else: # File upload
|
97 |
+
image = Image.fromarray(input_image)
|
98 |
+
|
99 |
+
image = np.array(image)
|
100 |
+
if len(image.shape) == 2: # grayscale
|
101 |
+
image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
|
102 |
+
elif image.shape[2] == 4: # RGBA
|
103 |
+
image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
|
104 |
+
|
105 |
+
# 이미지 크기 조정
|
106 |
+
image = cv2.resize(image, imgsz)
|
107 |
+
|
108 |
+
processed_image = process_image(image, conf_threshold, iou_threshold, label_mode)
|
109 |
+
return processed_image
|
110 |
+
|
111 |
+
# Gradio 인터페이스 설정
|
112 |
+
demo = gr.Interface(
|
113 |
+
fn=detect_nsfw,
|
114 |
+
inputs=[
|
115 |
+
gr.Image(type="numpy", label="Upload an image or enter a URL"),
|
116 |
+
gr.Slider(0, 1, value=0.45, label="Confidence Threshold"),
|
117 |
+
gr.Slider(0, 1, value=0.45, label="Overlap Threshold"),
|
118 |
+
gr.Dropdown(["Draw box", "Draw Label", "Draw Confidence", "Censor Predictions"], label="Label Display Mode", value="Draw box")
|
119 |
+
],
|
120 |
+
outputs=gr.Image(type="numpy", label="Processed Image"),
|
121 |
+
title="YOLOv9 NSFW Content Detection",
|
122 |
+
description="Upload an image or enter a URL to detect NSFW content using YOLOv9."
|
123 |
+
)
|
124 |
+
|
125 |
+
if __name__ == "__main__":
|
126 |
+
demo.launch(server_name="0.0.0.0")
|
utils/api.py
ADDED
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import File, UploadFile, Form
|
2 |
+
from fastapi.responses import JSONResponse
|
3 |
+
from PIL import Image
|
4 |
+
import io
|
5 |
+
import numpy as np
|
6 |
+
import torch
|
7 |
+
|
8 |
+
from .general import non_max_suppression, scale_boxes
|
9 |
+
from models.common import DetectMultiBackend
|
10 |
+
from .torch_utils import select_device
|
11 |
+
|
12 |
+
# 모델 로드
|
13 |
+
device = select_device('')
|
14 |
+
model = DetectMultiBackend('weights/nsfw_detector_e_rok.pt', device=device, dnn=False, data='data/coco128.yaml', fp16=False)
|
15 |
+
names = model.names
|
16 |
+
imgsz = (640, 640)
|
17 |
+
|
18 |
+
async def process_image_api(
|
19 |
+
file: UploadFile = File(...),
|
20 |
+
conf_threshold: float = Form(0.25),
|
21 |
+
iou_threshold: float = Form(0.45),
|
22 |
+
label_mode: str = Form("Draw Confidence")
|
23 |
+
):
|
24 |
+
contents = await file.read()
|
25 |
+
image = Image.open(io.BytesIO(contents))
|
26 |
+
image_np = np.array(image)
|
27 |
+
|
28 |
+
result = process_image(image_np, conf_threshold, iou_threshold, label_mode)
|
29 |
+
|
30 |
+
return JSONResponse(content={"result": result.result})
|
31 |
+
|
32 |
+
def process_image(image, conf_threshold, iou_threshold, label_mode):
|
33 |
+
# 이미지 전처리
|
34 |
+
im = torch.from_numpy(image).to(device).permute(2, 0, 1) # HWC to CHW
|
35 |
+
im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
|
36 |
+
im /= 255 # 0 - 255 to 0.0 - 1.0
|
37 |
+
if len(im.shape) == 3:
|
38 |
+
im = im[None] # expand for batch dim
|
39 |
+
|
40 |
+
# 이미지 크기 조정
|
41 |
+
im = torch.nn.functional.interpolate(im, size=imgsz, mode='bilinear', align_corners=False)
|
42 |
+
|
43 |
+
# 추론
|
44 |
+
pred = model(im, augment=False, visualize=False)
|
45 |
+
if isinstance(pred, list):
|
46 |
+
pred = pred[0] # 첫 번째 요소 선택 (일반적으로 단일 이미지 추론의 경우)
|
47 |
+
|
48 |
+
# NMS
|
49 |
+
pred = non_max_suppression(pred, conf_threshold, iou_threshold, None, False, max_det=1000)
|
50 |
+
|
51 |
+
# 결과 처리
|
52 |
+
img = image.copy()
|
53 |
+
|
54 |
+
harmful_label_list = []
|
55 |
+
annotations = []
|
56 |
+
|
57 |
+
for i, det in enumerate(pred): # per image
|
58 |
+
if len(det):
|
59 |
+
# Rescale boxes from img_size to im0 size
|
60 |
+
det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], img.shape).round()
|
61 |
+
|
62 |
+
# Write results
|
63 |
+
for *xyxy, conf, cls in reversed(det):
|
64 |
+
c = int(cls) # integer class
|
65 |
+
if c != 6:
|
66 |
+
harmful_label_list.append(c)
|
67 |
+
|
68 |
+
annotation = {
|
69 |
+
'xyxy': xyxy,
|
70 |
+
'conf': float(conf),
|
71 |
+
'cls': c,
|
72 |
+
'label': f"{names[c]} {conf:.2f}" if label_mode == "Draw Confidence" else f"{names[c]}"
|
73 |
+
}
|
74 |
+
annotations.append(annotation)
|
75 |
+
|
76 |
+
result = 'nsfw' if harmful_label_list else 'nomal'
|
77 |
+
return ProcessResponse(result=result)
|
78 |
+
|
79 |
+
class ProcessResponse:
|
80 |
+
def __init__(self, result: int):
|
81 |
+
self.result = result
|
82 |
+
# self.annotations = annotations
|
utils/gunicorn_config.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
workers = 1
|
2 |
+
worker_class = "uvicorn.workers.UvicornWorker"
|
3 |
+
bind = '0.0.0.0:8001'
|