akameswa commited on
Commit
82570e4
·
verified ·
1 Parent(s): 8983d6e

Update src/pipelines/inpainting.py

Browse files
Files changed (1) hide show
  1. src/pipelines/inpainting.py +35 -35
src/pipelines/inpainting.py CHANGED
@@ -1,42 +1,42 @@
1
- import torch
2
- import spaces
3
- import gradio as gr
4
- from src.util.base import *
5
- from src.util.params import *
6
- from diffusers import AutoPipelineForInpainting
7
 
8
- inpaint_pipe = AutoPipelineForInpainting.from_pretrained(inpaint_model_path).to(torch_device)
9
- # inpaint_pipe = AutoPipelineForInpainting.from_pipe(pipe).to(torch_device)
10
 
11
- @spaces.GPU(enable_queue=True)
12
- def inpaint(dict, num_inference_steps, seed, prompt="", progress=gr.Progress()):
13
- progress(0)
14
- mask = dict["mask"].convert("RGB").resize((imageHeight, imageWidth))
15
- init_image = dict["image"].convert("RGB").resize((imageHeight, imageWidth))
16
- output = inpaint_pipe(
17
- prompt=prompt,
18
- image=init_image,
19
- mask_image=mask,
20
- guidance_scale=guidance_scale,
21
- num_inference_steps=num_inference_steps,
22
- generator=torch.Generator().manual_seed(seed),
23
- )
24
- progress(1)
25
 
26
- fname = "inpainting"
27
- tab_config = {
28
- "Tab": "Inpainting",
29
- "Prompt": prompt,
30
- "Number of Inference Steps per Image": num_inference_steps,
31
- "Seed": seed,
32
- }
33
 
34
- imgs_list = []
35
- imgs_list.append((output.images[0], "Inpainted Image"))
36
- imgs_list.append((mask, "Mask"))
37
 
38
- export_as_zip(imgs_list, fname, tab_config)
39
- return output.images[0], f"outputs/{fname}.zip"
40
 
41
 
42
- __all__ = ["inpaint"]
 
1
+ # import torch
2
+ # import spaces
3
+ # import gradio as gr
4
+ # from src.util.base import *
5
+ # from src.util.params import *
6
+ # from diffusers import AutoPipelineForInpainting
7
 
8
+ # inpaint_pipe = AutoPipelineForInpainting.from_pretrained(inpaint_model_path).to(torch_device)
9
+ # # inpaint_pipe = AutoPipelineForInpainting.from_pipe(pipe).to(torch_device)
10
 
11
+ # @spaces.GPU(enable_queue=True)
12
+ # def inpaint(dict, num_inference_steps, seed, prompt="", progress=gr.Progress()):
13
+ # progress(0)
14
+ # mask = dict["mask"].convert("RGB").resize((imageHeight, imageWidth))
15
+ # init_image = dict["image"].convert("RGB").resize((imageHeight, imageWidth))
16
+ # output = inpaint_pipe(
17
+ # prompt=prompt,
18
+ # image=init_image,
19
+ # mask_image=mask,
20
+ # guidance_scale=guidance_scale,
21
+ # num_inference_steps=num_inference_steps,
22
+ # generator=torch.Generator().manual_seed(seed),
23
+ # )
24
+ # progress(1)
25
 
26
+ # fname = "inpainting"
27
+ # tab_config = {
28
+ # "Tab": "Inpainting",
29
+ # "Prompt": prompt,
30
+ # "Number of Inference Steps per Image": num_inference_steps,
31
+ # "Seed": seed,
32
+ # }
33
 
34
+ # imgs_list = []
35
+ # imgs_list.append((output.images[0], "Inpainted Image"))
36
+ # imgs_list.append((mask, "Mask"))
37
 
38
+ # export_as_zip(imgs_list, fname, tab_config)
39
+ # return output.images[0], f"outputs/{fname}.zip"
40
 
41
 
42
+ # __all__ = ["inpaint"]