import gradio as gr from transformers import SegformerFeatureExtractor, SegformerForSemanticSegmentation from PIL import Image import numpy as np import torch # 모델과 feature extractor 로드 model_name = "nvidia/segformer-b0-finetuned-ade-512-512" model = SegformerForSemanticSegmentation.from_pretrained(model_name) feature_extractor = SegformerFeatureExtractor.from_pretrained(model_name) def create_color_map(num_classes): """ 임의의 색상 매핑 생성 """ np.random.seed(42) # 재현성을 위한 시드 설정 return {i: np.random.randint(0, 256, 3) for i in range(num_classes)} def segment_image(image): # 이미지 처리 inputs = feature_extractor(images=image, return_tensors="pt") with torch.no_grad(): outputs = model(**inputs) # 마스크 생성 upsampled_logits = torch.nn.functional.interpolate( outputs.logits, size=image.size[::-1], mode="bilinear", align_corners=False ) upsampled_predictions = upsampled_logits.argmax(dim=1) mask = upsampled_predictions.squeeze().numpy() # 색상 매핑 color_map = create_color_map(150) # ADE20K에는 약 150개의 클래스가 있음 colored_mask = np.array([color_map[class_id] for class_id in mask.flatten()]).reshape(mask.shape + (3,)) # 결과 반환 return Image.fromarray(colored_mask.astype(np.uint8)) # 예시 이미지 경로 example_images = ["image1.jpg", "image2.jpg", "image3.jpg"] # Gradio 인터페이스 설정 iface = gr.Interface( fn=segment_image, inputs=gr.inputs.Image(type="pil"), outputs="image", title="Image Segmentation with SegFormer", description="Upload an image to segment it using SegFormer model.", examples=example_images ) # 인터페이스 실행 iface.launch()