Spaces:
Runtime error
Runtime error
File size: 7,314 Bytes
3ee92ac 3dbd6ff 3ee92ac 5f4cbf4 3ee92ac d46ad28 3ee92ac 3dbd6ff ba4c64d 3dbd6ff ba4c64d 3dbd6ff 3ee92ac a2f30af 1602533 4b455f2 4c82dea 3ee92ac 72e357f 35ad4f6 4b455f2 35ad4f6 3ee92ac 3dbd6ff b53d427 3dbd6ff f9e9115 3dbd6ff 3ee92ac e29e4d5 3ee92ac 3dbd6ff 3ee92ac f057b7d 3ee92ac |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
# -*- coding: utf-8 -*-
import sys
import io
import requests
import json
import base64
from PIL import Image
import numpy as np
import gradio as gr
def inference_mask1_sam(prompt,
img,
img_):
files = {
"useSam" : 1,
"pimage" : resizeImg(prompt["image"]),
"pmask" : resizeImg(prompt["mask"]),
"img" : resizeImg(img),
"img_" : resizeImg(img_)
}
r = requests.post("http://120.92.79.209/painter/run", json = files)
a = json.loads(r.text)
res = []
for i in range(len(a)):
#out = Image.open(io.BytesIO(base64.b64decode(a[i])))
#out = out.resize((224, 224))
#res.append(np.uint8(np.array(out)))
res.append(np.uint8(np.array(Image.open(io.BytesIO(base64.b64decode(a[i]))))))
return res[1:] # remove prompt image
def inference_mask1(prompt,
img,
img_):
files = {
"pimage" : resizeImg(prompt["image"]),
"pmask" : resizeImg(prompt["mask"]),
"img" : resizeImg(img),
"img_" : resizeImg(img_)
}
#r = requests.post("https://flagstudio.baai.ac.cn/painter/run", json = files)
r = requests.post("http://120.92.79.209/painter/run", json = files)
a = json.loads(r.text)
res = []
for i in range(len(a)):
#out = Image.open(io.BytesIO(base64.b64decode(a[i])))
#out = out.resize((224, 224))
#res.append(np.uint8(np.array(out)))
res.append(np.uint8(np.array(Image.open(io.BytesIO(base64.b64decode(a[i]))))))
return res
def resizeImg(img):
res, hres = 448, 448
img = Image.fromarray(img).convert("RGB")
img = img.resize((res, hres))
temp = io.BytesIO()
img.save(temp, format="WEBP")
return base64.b64encode(temp.getvalue()).decode('ascii')
def inference_mask_cat(
prompt,
img,
img_,
):
output_list = [img, img_]
return output_list
# define app features and run
examples = [
['./images/hmbb_1.jpg', './images/hmbb_2.jpg', './images/hmbb_3.jpg'],
['./images/rainbow_1.jpg', './images/rainbow_2.jpg', './images/rainbow_3.jpg'],
['./images/earth_1.jpg', './images/earth_2.jpg', './images/earth_3.jpg'],
['./images/obj_1.jpg', './images/obj_2.jpg', './images/obj_3.jpg'],
['./images/ydt_2.jpg', './images/ydt_1.jpg', './images/ydt_3.jpg'],
]
examples_sam = [
['./images/nc_1.jpg', './images/nc_2.jpg', './images/nc_3.jpg'],
['./images/street_1.jpg', './images/street_2.jpg', './images/street_3.jpg'],
['./images/hmbb_1.jpg', './images/hmbb_2.jpg', './images/hmbb_3.jpg'],
['./images/earth_1.jpg', './images/earth_2.jpg', './images/earth_3.jpg'],
['./images/ydt_2.jpg', './images/ydt_1.jpg', './images/ydt_3.jpg'],
]
demo_mask = gr.Interface(fn=inference_mask1,
inputs=[gr.ImageMask(brush_radius=8, label="prompt (提示图)"), gr.Image(label="img1 (测试图1)"), gr.Image(label="img2 (测试图2)")],
#outputs=[gr.Image(shape=(448, 448), label="output1 (输出图1)"), gr.Image(shape=(448, 448), label="output2 (输出图2)")],
outputs=[gr.Image(label="output1 (输出图1)").style(height=256, width=256), gr.Image(label="output2 (输出图2)").style(height=256, width=256)],
#outputs=gr.Gallery(label="outputs (输出图)"),
examples=examples,
#title="SegGPT for Any Segmentation<br>(Painter Inside)",
description="<p> \
Choose an example below 🔥 🔥 🔥 <br>\
Or, upload by yourself: <br>\
1. Upload images to be tested to 'img1' and/or 'img2'. <br>2. Upload a prompt image to 'prompt' and draw a mask. <br>\
<br> \
💎 The more accurate you annotate, the more accurate the model predicts. <br>\
💎 Examples below were never trained and are randomly selected for testing in the wild. <br>\
💎 Current UI interface only unleashes a small part of the capabilities of SegGPT, i.e., 1-shot case. \
</p>",
cache_examples=False,
allow_flagging="never",
)
demo_mask_sam = gr.Interface(fn=inference_mask1_sam,
inputs=[gr.ImageMask(brush_radius=4, label="prompt (提示图)"), gr.Image(label="img1 (测试图1)"), gr.Image(label="img2 (测试图2)")],
#outputs=[gr.Image(shape=(448, 448), label="output1 (输出图1)"), gr.Image(shape=(448, 448), label="output2 (输出图2)")],
# outputs=[gr.Image(label="output1 (输出图1)").style(height=256, width=256), gr.Image(label="output2 (输出图2)").style(height=256, width=256)],
#outputs=gr.Gallery(label="outputs (输出图)"),
outputs=[gr.Image(label="SAM output (mask)").style(height=256, width=256),gr.Image(label="output1 (输出图1)").style(height=256, width=256), gr.Image(label="output2 (输出图2)").style(height=256, width=256)],
# outputs=[gr.Image(label="output3 (输出图1)").style(height=256, width=256), gr.Image(label="output4 (输出图2)").style(height=256, width=256)],
examples=examples_sam,
#title="SegGPT for Any Segmentation<br>(Painter Inside)",
description="<p> \
<strong>SAM+SegGPT: One touch for segmentation in all images or videos.<strong> <br>\
Choose an example below 🔥 🔥 🔥 <br>\
Or, upload by yourself: <br>\
1. Upload images to be tested to 'img1' and 'img2'. <br>2. Upload a prompt image to 'prompt' and draw <strong>a point or line on the target<strong>. <br>\
<br> \
💎 SAM segments the target with any point or scribble, then SegGPT segments all other images. <br>\
💎 Examples below were never trained and are randomly selected for testing in the wild. <br>\
💎 Current UI interface only unleashes a small part of the capabilities of SegGPT, i.e., 1-shot case. \
</p>",
cache_examples=False,
allow_flagging="never",
)
title = "SegGPT: Segmenting Everything In Context<br> \
<div align='center'> \
<h2><a href='https://arxiv.org/abs/2304.03284' target='_blank' rel='noopener'>[paper]</a> \
<a href='https://github.com/baaivision/Painter' target='_blank' rel='noopener'>[code]</a></h2> \
<br> \
<image src='file/rainbow2.gif' width='720px' /> \
<h2>SegGPT performs arbitrary segmentation tasks in images or videos via in-context inference, such as object instance, stuff, part, contour, and text, with only one single model.</h2> \
</div> \
"
demo = gr.TabbedInterface([demo_mask_sam, demo_mask], ['SAM+SegGPT (一触百通)', 'General 1-shot'], title=title)
#demo.launch(share=True, auth=("baai", "vision"))
demo.launch(enable_queue=False)
#demo.launch(server_name="0.0.0.0", server_port=34311)
# -
|