File size: 1,380 Bytes
82570e4
 
 
 
 
 
04ef268
82570e4
 
04ef268
82570e4
 
 
 
 
 
 
 
 
 
 
 
 
 
04ef268
82570e4
 
 
 
 
 
 
04ef268
82570e4
 
 
04ef268
82570e4
 
04ef268
 
82570e4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
# import torch
# import spaces
# import gradio as gr
# from src.util.base import *
# from src.util.params import *
# from diffusers import AutoPipelineForInpainting

# inpaint_pipe = AutoPipelineForInpainting.from_pretrained(inpaint_model_path).to(torch_device)
# # inpaint_pipe = AutoPipelineForInpainting.from_pipe(pipe).to(torch_device)

# @spaces.GPU(enable_queue=True)
# def inpaint(dict, num_inference_steps, seed, prompt="", progress=gr.Progress()):
#     progress(0)
#     mask = dict["mask"].convert("RGB").resize((imageHeight, imageWidth))
#     init_image = dict["image"].convert("RGB").resize((imageHeight, imageWidth))
#     output = inpaint_pipe(
#         prompt=prompt,
#         image=init_image,
#         mask_image=mask,
#         guidance_scale=guidance_scale,
#         num_inference_steps=num_inference_steps,
#         generator=torch.Generator().manual_seed(seed),
#     )
#     progress(1)

#     fname = "inpainting"
#     tab_config = {
#         "Tab": "Inpainting",
#         "Prompt": prompt,
#         "Number of Inference Steps per Image": num_inference_steps,
#         "Seed": seed,
#     }

#     imgs_list = []
#     imgs_list.append((output.images[0], "Inpainted Image"))
#     imgs_list.append((mask, "Mask"))

#     export_as_zip(imgs_list, fname, tab_config)
#     return output.images[0], f"outputs/{fname}.zip"


# __all__ = ["inpaint"]