Spaces:
Runtime error
Runtime error
File size: 1,329 Bytes
a87c3be 0462cba 172362f a87c3be 9378acd a87c3be cac5c68 172362f a87c3be cac5c68 463c121 a87c3be 172362f a87c3be 5e1d0b4 a87c3be 9378acd a87c3be 172362f a87c3be 463c121 cf15f5b a87c3be |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 |
import gradio as gr
from transformers import SegformerFeatureExtractor, SegformerForSemanticSegmentation
from PIL import Image
import torch
# ๋ชจ๋ธ๊ณผ feature extractor ๋ก๋
model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b3-finetuned-cityscapes-1024-1024")
feature_extractor = SegformerFeatureExtractor.from_pretrained("nvidia/segformer-b3-finetuned-cityscapes-1024-1024")
# ์ด๋ฏธ์ง๋ฅผ ์ฒ๋ฆฌํ๋ ํจ์
def predict(image):
# ์ด๋ฏธ์ง๋ฅผ ๋ชจ๋ธ์ ๋ง๊ฒ ๋ณํ
processed_image = image.resize((1024, 1024))
inputs = feature_extractor(images=processed_image, return_tensors="pt")
outputs = model(**inputs)
logits = outputs.logits
# ๊ฒฐ๊ณผ๋ฅผ ์ด๋ฏธ์ง๋ก ๋ณํ (์: ๊ฐ์ฅ ๋์ ํ๋ฅ ์ ๊ฐ์ง ํด๋์ค ์ ํ)
result = torch.argmax(logits)
result = result.squeeze().cpu().numpy()
# ์ฌ๊ธฐ์์๋ ๋จ์ํ๋ฅผ ์ํด ๊ฒฐ๊ณผ๋ฅผ ๊ทธ๋๋ก ๋ฐํํฉ๋๋ค.
# ์ค์ ๋ก๋ ๊ฒฐ๊ณผ๋ฅผ ์ ์ ํ ํ์์ผ๋ก ๋ณํํด์ผ ํ ์ ์์ต๋๋ค.
return result
# Gradio ์ธํฐํ์ด์ค ์์ฑ
demo = gr.Interface(
fn=predict,
inputs=gr.inputs.Image(type='pil'),
outputs=gr.outputs.Image(type='pil'),
examples=["image1.jpg", "image2.jpg", "image3.jpg"] # ์ธ ๊ฐ์ ์์ ์ด๋ฏธ์ง ๊ฒฝ๋ก
)
# ์ธํฐํ์ด์ค ์คํ
demo.launch() |