michaelapplydesign's picture
up v4
13cb3ce
raw
history blame
1.84 kB
# import gradio as gr
#
# def greet(name):
# return "V5 Hello " + name + "!!"
#
# iface = gr.Interface(
# fn=greet,
# inputs="text",
# outputs="text",
# title="MB TEST 1",
# )
# iface.launch(share=True)
import gradio as gr
from models import make_inpainting
import io
from PIL import Image
import numpy as np
# from transformers import pipeline
#
# pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
def image_to_byte_array(image: Image) -> bytes:
# BytesIO is a fake file stored in memory
imgByteArr = io.BytesIO()
# image.save expects a file as a argument, passing a bytes io ins
image.save(imgByteArr, format='png') # image.format
# Turn the BytesIO object back into a bytes object
imgByteArr = imgByteArr.getvalue()
return imgByteArr
def predict(input_img1,input_img2):
# image = Image.open(requests.get("https://applydesignblobs-chh5aahjdzh0cnew.z01.azurefd.net/spaceimages/org_sqr_7fee0869-3187-4363-b5fb-5233e943649d.png", stream=True).raw)
# mask = Image.open(requests.get("https://applydesign.blob.core.windows.net/spaceimages/mask_e85b1585-8.png", stream=True).raw)
result_image = make_inpainting(positive_prompt='test1',
image=image_to_byte_array(input_img1),
mask_image=np.array(input_img2),
negative_prompt="xxx",
)
# predictions = pipeline(input_img1)
return input_img1
gradio_app = gr.Interface(
predict,
inputs=[gr.Image(label="img", sources=['upload', 'webcam'], type="pil"),
gr.Image(label="mask", sources=['upload', 'webcam'], type="pil")
],
outputs= gr.Image(label="resp"),
title="rem fur 1",
)
gradio_app.launch(share=True)