LearningnRunning commited on
Commit
3a53eae
·
1 Parent(s): 9e31b92

FIX path error

Browse files
Files changed (4) hide show
  1. app.py +29 -108
  2. config/settings.py +2 -1
  3. models/common.py +17 -1
  4. utils/data_processing.py +2 -2
app.py CHANGED
@@ -1,126 +1,47 @@
1
  import gradio as gr
2
- import cv2
3
- import numpy as np
4
- from PIL import Image
5
- import requests
6
- from io import BytesIO
7
- import torch
8
  import sys
9
  from pathlib import Path
10
  import os
 
 
11
  FILE = Path(__file__).resolve()
12
- ROOT = FILE.parents[0] # YOLOv5 root directory
13
  if str(ROOT) not in sys.path:
14
- sys.path.append(str(ROOT)) # add ROOT to PATH
15
- ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
16
 
17
- from models.common import DetectMultiBackend
18
- from utils.general import (check_img_size, non_max_suppression, scale_boxes)
19
- from utils.plots import Annotator, colors
20
- from utils.torch_utils import select_device
21
 
22
- # YOLOv9 모델 로드
23
- device = select_device('')
24
- model = DetectMultiBackend('./weights/nsfw_detector_e_rok.pt', device=device, dnn=False, data=None, fp16=False)
25
- stride, names, pt = model.stride, model.names, model.pt
26
- imgsz = check_img_size((640, 640), s=stride) # check image size
27
 
28
- def process_image(image, conf_threshold, iou_threshold, label_mode):
29
- # 이미지 전처리
30
- im = torch.from_numpy(image).to(device).permute(2, 0, 1) # HWC to CHW
31
- im = im.half() if model.fp16 else im.float() # uint8 to fp16/32
32
- im /= 255 # 0 - 255 to 0.0 - 1.0
33
- if len(im.shape) == 3:
34
- im = im[None] # expand for batch dim
35
-
36
- # 이미지 크기 조정
37
- im = torch.nn.functional.interpolate(im, size=imgsz, mode='bilinear', align_corners=False)
38
-
39
- # 추론
40
- pred = model(im, augment=False, visualize=False)
41
- if isinstance(pred, list):
42
- pred = pred[0] # 첫 번째 요소 선택 (일반적으로 단일 이미지 추론의 경우)
43
-
44
- # NMS
45
- pred = non_max_suppression(pred, conf_threshold, iou_threshold, None, False, max_det=1000)
46
 
47
- # 결과 처리
48
- img = image.copy()
49
-
50
- harmful_label_list = []
51
- annotations = []
52
-
53
- for i, det in enumerate(pred): # per image
54
- if len(det):
55
- # Rescale boxes from img_size to im0 size
56
- det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], img.shape).round()
57
-
58
- # Write results
59
- for *xyxy, conf, cls in reversed(det):
60
- c = int(cls) # integer class
61
- if c != 6:
62
- harmful_label_list.append(c)
63
-
64
- annotation = {
65
- 'xyxy': xyxy,
66
- 'conf': conf,
67
- 'cls': c,
68
- 'label': f"{names[c]} {conf:.2f}" if label_mode == "Draw Confidence" else f"{names[c]}"
69
- }
70
- annotations.append(annotation)
71
-
72
- if harmful_label_list:
73
- gr.Error("Warning, this is a harmful image.")
74
- # 이미지 전체를 흐리게 처리
75
- img = cv2.GaussianBlur(img, (125, 125), 0)
76
- else:
77
- gr.Info('This is a safe image.')
78
-
79
- # Annotator 적용
80
- annotator = Annotator(img, line_width=3, example=str(names))
81
-
82
- for ann in annotations:
83
- if label_mode == "Draw box":
84
- annotator.box_label(ann['xyxy'], None, color=colors(ann['cls'], True))
85
- elif label_mode in ["Draw Label", "Draw Confidence"]:
86
- annotator.box_label(ann['xyxy'], ann['label'], color=colors(ann['cls'], True))
87
- elif label_mode == "Censor Predictions":
88
- cv2.rectangle(img, (int(ann['xyxy'][0]), int(ann['xyxy'][1])), (int(ann['xyxy'][2]), int(ann['xyxy'][3])), (0, 0, 0), -1)
89
 
90
- return annotator.result()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
- def detect_nsfw(input_image, conf_threshold, iou_threshold, label_mode):
93
- if isinstance(input_image, str): # URL input
94
- response = requests.get(input_image)
95
- image = Image.open(BytesIO(response.content))
96
- else: # File upload
97
- image = Image.fromarray(input_image)
98
 
99
- image = np.array(image)
100
- if len(image.shape) == 2: # grayscale
101
- image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB)
102
- elif image.shape[2] == 4: # RGBA
103
- image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
104
 
105
- # 이미지 크기 조정
106
- image = cv2.resize(image, imgsz)
107
 
108
- processed_image = process_image(image, conf_threshold, iou_threshold, label_mode)
109
- return processed_image
110
-
111
- # Gradio 인터페이스 설정
112
- demo = gr.Interface(
113
- fn=detect_nsfw,
114
- inputs=[
115
- gr.Image(type="numpy", label="Upload an image or enter a URL"),
116
- gr.Slider(0, 1, value=0.3, label="Confidence Threshold"),
117
- gr.Slider(0, 1, value=0.45, label="Overlap Threshold"),
118
- gr.Dropdown(["Draw box", "Draw Label", "Draw Confidence", "Censor Predictions"], label="Label Display Mode", value="Draw box")
119
- ],
120
- outputs=gr.Image(type="numpy", label="Processed Image"),
121
- title="YOLOv9 NSFW Content Detection",
122
- description="Upload an image or enter a URL to detect NSFW content using YOLOv9."
123
- )
124
 
125
  if __name__ == "__main__":
126
  demo.launch(server_name="0.0.0.0")
 
1
  import gradio as gr
 
 
 
 
 
 
2
  import sys
3
  from pathlib import Path
4
  import os
5
+ from utils.data_processing import detect_nsfw
6
+ # Import YOLO-related modules
7
  FILE = Path(__file__).resolve()
8
+ ROOT = FILE.parents[0]
9
  if str(ROOT) not in sys.path:
10
+ sys.path.append(str(ROOT))
11
+ ROOT = Path(os.path.relpath(ROOT, Path.cwd()))
12
 
 
 
 
 
13
 
 
 
 
 
 
14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
+ # Gradio interface
18
+ with gr.Blocks() as demo:
19
+ gr.Markdown("# NSFW Content Detection")
20
+ with gr.Row():
21
+ detection_mode = gr.Radio(["Simple Check", "Detailed Analysis"], label="Detection Mode", value="Simple Check")
22
+ with gr.Row():
23
+ conf_threshold = gr.Slider(0, 1, value=0.3, label="Confidence Threshold", visible=False)
24
+ iou_threshold = gr.Slider(0, 1, value=0.45, label="Overlap Threshold", visible=False)
25
+ label_mode = gr.Dropdown(["Draw box", "Draw Label", "Draw Confidence", "Censor Predictions"], label="Label Display Mode", value="Draw box", visible=False)
26
+
27
+ with gr.Row():
28
+ input_image = gr.Image(type="numpy", label="Upload an image or enter a URL")
29
+ output_text = gr.Textbox(label="Detection Result")
30
+ with gr.Row():
31
+ output_image = gr.Image(type="numpy", label="Processed Image (for detailed analysis)", visible=False)
32
 
33
+ detect_button = gr.Button("Detect")
 
 
 
 
 
34
 
35
+ def update_visibility(mode):
36
+ return [gr.update(visible=(mode == "Detailed Analysis"))] * 4
 
 
 
37
 
38
+ detection_mode.change(update_visibility, inputs=[detection_mode], outputs=[conf_threshold, iou_threshold, label_mode, output_image])
 
39
 
40
+ detect_button.click(
41
+ detect_nsfw,
42
+ inputs=[input_image, detection_mode, conf_threshold, iou_threshold, label_mode],
43
+ outputs=[output_text, output_image]
44
+ )
 
 
 
 
 
 
 
 
 
 
 
45
 
46
  if __name__ == "__main__":
47
  demo.launch(server_name="0.0.0.0")
config/settings.py CHANGED
@@ -3,4 +3,5 @@ import os
3
  BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
4
 
5
  # MODEL_PATH를 절대 경로로 설정
6
- MODEL_PATH = os.path.join(BASE_DIR, 'weights', 'yolov9_c_nsfw.pt')
 
 
3
  BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
4
 
5
  # MODEL_PATH를 절대 경로로 설정
6
+ DETECT_MODEL_PATH = os.path.join(BASE_DIR, 'weights', 'yolov9_c_nsfw.pt')
7
+ CLASSIFICATION_MODEL_PATH = "Falconsai/nsfw_image_detection"
models/common.py CHANGED
@@ -21,6 +21,7 @@ import torch.nn as nn
21
  from IPython.display import display
22
  from PIL import Image
23
  from torch.cuda import amp
 
24
 
25
  from utils import TryExcept
26
  from utils.dataloaders import exif_transpose, letterbox
@@ -29,7 +30,7 @@ from utils.general import (LOGGER, ROOT, Profile, check_requirements, check_suff
29
  xywh2xyxy, xyxy2xywh, yaml_load)
30
  from utils.plots import Annotator, colors, save_one_box
31
  from utils.torch_utils import copy_attr, smart_inference_mode
32
-
33
 
34
  def autopad(k, p=None, d=1): # kernel, padding, dilation
35
  # Pad to 'same' shape outputs
@@ -1198,3 +1199,18 @@ class Classify(nn.Module):
1198
  if isinstance(x, list):
1199
  x = torch.cat(x, 1)
1200
  return self.linear(self.drop(self.pool(self.conv(x)).flatten(1)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  from IPython.display import display
22
  from PIL import Image
23
  from torch.cuda import amp
24
+ from transformers import AutoModelForImageClassification, ViTImageProcessor
25
 
26
  from utils import TryExcept
27
  from utils.dataloaders import exif_transpose, letterbox
 
30
  xywh2xyxy, xyxy2xywh, yaml_load)
31
  from utils.plots import Annotator, colors, save_one_box
32
  from utils.torch_utils import copy_attr, smart_inference_mode
33
+ from config.settings import CLASSIFICATION_MODEL_PATH
34
 
35
  def autopad(k, p=None, d=1): # kernel, padding, dilation
36
  # Pad to 'same' shape outputs
 
1199
  if isinstance(x, list):
1200
  x = torch.cat(x, 1)
1201
  return self.linear(self.drop(self.pool(self.conv(x)).flatten(1)))
1202
+
1203
+ class NSFWModel:
1204
+ def __init__(self):
1205
+ self.model = AutoModelForImageClassification.from_pretrained(CLASSIFICATION_MODEL_PATH)
1206
+ self.processor = ViTImageProcessor.from_pretrained(CLASSIFICATION_MODEL_PATH)
1207
+ self.id2label = self.model.config.id2label
1208
+
1209
+ def predict(self, image: Image.Image) -> str:
1210
+ with torch.no_grad():
1211
+ inputs = self.processor(images=image, return_tensors="pt")
1212
+ outputs = self.model(**inputs)
1213
+ logits = outputs.logits
1214
+ predicted_label = logits.argmax(-1).item()
1215
+
1216
+ return self.id2label[predicted_label]
utils/data_processing.py CHANGED
@@ -10,14 +10,14 @@ from models.common import DetectMultiBackend, NSFWModel
10
  from utils.torch_utils import select_device
11
  from utils.general import (check_img_size, non_max_suppression, scale_boxes)
12
  from utils.plots import Annotator, colors
13
-
14
 
15
  # Load classification model
16
  nsfw_model = NSFWModel()
17
 
18
  # Load YOLO model
19
  device = select_device('')
20
- yolo_model = DetectMultiBackend('./weights/nsfw_detector_e_rok.pt', device=device, dnn=False, data=None, fp16=False)
21
  stride, names, pt = yolo_model.stride, yolo_model.names, yolo_model.pt
22
  imgsz = check_img_size((640, 640), s=stride)
23
 
 
10
  from utils.torch_utils import select_device
11
  from utils.general import (check_img_size, non_max_suppression, scale_boxes)
12
  from utils.plots import Annotator, colors
13
+ from config.settings import MODEL_PATH
14
 
15
  # Load classification model
16
  nsfw_model = NSFWModel()
17
 
18
  # Load YOLO model
19
  device = select_device('')
20
+ yolo_model = DetectMultiBackend(DETECT_MODEL_PATH, device=device, dnn=False, data=None, fp16=False)
21
  stride, names, pt = yolo_model.stride, yolo_model.names, yolo_model.pt
22
  imgsz = check_img_size((640, 640), s=stride)
23