V1 / app.py
michaelapplydesign's picture
up
04c7187
raw
history blame
3.61 kB
import gradio as gr
import numpy as np
from models import make_inpainting
import utils
from transformers import MaskFormerImageProcessor, MaskFormerForInstanceSegmentation
from PIL import Image
import requests
from transformers import pipeline
import torch
import random
import io
import base64
def removeFurniture(input_img1,
input_img2,
positive_prompt,
negative_prompt,
num_of_images,
resolution
):
print("removeFurniture")
HEIGHT = resolution
WIDTH = resolution
input_img1 = input_img1.resize((resolution, resolution))
input_img2 = input_img2.resize((resolution, resolution))
canvas_mask = np.array(input_img2)
mask = utils.get_mask(canvas_mask)
print(input_img1, mask, positive_prompt, negative_prompt)
retList= make_inpainting(positive_prompt=positive_prompt,
image=input_img1,
mask_image=mask,
negative_prompt=negative_prompt,
num_of_images=num_of_images,
resolution=resolution
)
# add the rest up to 10
while (len(retList)<10):
retList.append(None)
return retList
def imageToString(img):
output = io.BytesIO()
img.save(output, format="png")
return output.getvalue()
def segmentation(img):
print("segmentation")
# semantic_segmentation = pipeline("image-segmentation", "nvidia/segformer-b1-finetuned-cityscapes-1024-1024")
semantic_segmentation = pipeline("image-segmentation", "facebook/maskformer-swin-large-ade")
results = semantic_segmentation(img)
for p in results:
p['mask'] = utils.image_to_byte_array(p['mask'])
p['mask'] = base64.b64encode(p['mask'])
#print(results)
return str(results)
def upscale(image):
return image
with gr.Blocks() as app:
with gr.Row():
with gr.Column():
gr.Button("FurnituRemove").click(removeFurniture,
inputs=[gr.Image(label="img", type="pil"),
gr.Image(label="mask", type="pil"),
gr.Textbox(label="positive_prompt",value="empty room"),
gr.Textbox(label="negative_prompt",value=""),
gr.Number(label="num_of_images",value=2),
gr.Number(label="resolution",value=512)
],
outputs=[
gr.Image(),
gr.Image(),
gr.Image(),
gr.Image(),
gr.Image(),
gr.Image(),
gr.Image(),
gr.Image(),
gr.Image(),
gr.Image()])
with gr.Column():
gr.Button("Segmentation").click(segmentation, inputs=gr.Image(type="pil"), outputs=gr.TextArea())
with gr.Column():
gr.Button("Upscale").click(upscale, inputs=gr.Image(type="pil"), outputs=gr.Image())
app.launch(debug=True,share=True)