Spaces:
Running
on
Zero
Running
on
Zero
File size: 1,676 Bytes
7a3d678 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
import torch
from diffusers import FluxInpaintPipeline
from diffusers.utils import load_image
from PIL import Image
import sys
import numpy as np
import json
import os
import spaces
device = "cuda"
pipeline_device = 0 if torch.cuda.is_available() else -1 # TODO mix above
torch_dtype = torch.float16
debug = True
@spaces.GPU
def make_inpaint_condition(image, image_mask):
image = np.array(image.convert("RGB")).astype(np.float32) / 255.0
image_mask = np.array(image_mask.convert("L")).astype(np.float32) / 255.0
if image.shape[0:1] != image_mask.shape[0:1]:
print("error image and image_mask must have the same image size")
return None
image[image_mask > 0.5] = -1.0 # set as masked pixel
image = np.expand_dims(image, 0).transpose(0, 3, 1, 2)
image = torch.from_numpy(image)
return image
@spaces.GPU
def process_image(image,mask_image,prompt="a girl",negative_prompt="",model_id="black-forest-labs/FLUX.1-schnell",strength=0.75,seed=0,num_inference_steps=4):
#control_image=make_inpaint_condition(image,mask_image)
#image.save("_control.jpg")
if image == None:
return None
pipe = FluxInpaintPipeline.from_pretrained(model_id, torch_dtype=torch.bfloat16)
#batch_size =1
generators = []
generator = torch.Generator(device).manual_seed(seed)
generators.append(generator)
output = pipe(prompt=prompt, image=image, mask_image=mask_image,generator=generator)
return output.images[0]
if __name__ == "__main__":
image = Image.open(sys.argv[1])
mask = Image.open(sys.argv[2])
output = process_image(image,mask)
output.save(sys.argv[3]) |