File size: 1,060 Bytes
a7b5cc1
b6eec52
 
a7b5cc1
b6eec52
a7b5cc1
b6eec52
 
 
 
 
 
 
a7b5cc1
 
 
b6eec52
 
 
 
 
 
a7b5cc1
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
import gradio as gr
import torch
from torchvision.transforms.functional import pil_to_tensor, to_pil_image

model = torch.jit.load("models/lama.pt")

def predict(input_img, input_mask):
    # numpy gives the image as (w,h,c)
    # Image shape should be (1, 3, 512, 512) and be in the range 0-1.
    # Mask shape should be (1, 1, 512, 512) AND have values 0.0 or 1.0, not in-between.
    #out = model(torch.tensor(input_img[None, (2,0,1), :, :])/255.0, torch.tensor(1 * (input_mask[:,:,0] > 0)).unsqueeze(0))
    out = model((pil_to_tensor(input_img.convert('RGB')) / 255.0).unsqueeze(0), 1 * (pil_to_tensor(input_mask.convert('L')) > 0).unsqueeze(0))[0]
    return to_pil_image(out)

gradio_app = gr.Interface(
    predict,
    inputs=[
        gr.Image(label="Select Base Image", sources=['upload',], type="pil"), 
        gr.Image(label="Select Image Mask (White will be inpainted)", sources=['upload',], type="pil"), 
    ],
    outputs=[gr.Image(label="Inpainted Image"),],
    title="LAMA Inpainting",
)

if __name__ == "__main__":
    gradio_app.launch()