kadirnar commited on
Commit
9879d7c
·
1 Parent(s): 2a7d466

Update diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_canny.py

Browse files
diffusion_webui/diffusion_models/controlnet/controlnet_inpaint/controlnet_inpaint_canny.py CHANGED
@@ -2,7 +2,8 @@ import cv2
2
  import gradio as gr
3
  import numpy as np
4
  import torch
5
- from diffusers import ControlNetModel, StableDiffusionControlNetPipeline
 
6
  from PIL import Image
7
 
8
  from diffusion_webui.utils.model_list import (
@@ -26,7 +27,7 @@ class StableDiffusionControlNetInpaintCannyGenerator:
26
  controlnet = ControlNetModel.from_pretrained(
27
  controlnet_model_path, torch_dtype=torch.float16
28
  )
29
- self.pipe = StableDiffusionControlNetPipeline.from_pretrained(
30
  pretrained_model_name_or_path=stable_model_path,
31
  controlnet=controlnet,
32
  safety_checker=None,
@@ -68,8 +69,12 @@ class StableDiffusionControlNetInpaintCannyGenerator:
68
  seed_generator: int,
69
  ):
70
 
71
- image = self.controlnet_canny_inpaint(image_path=image_path)
72
-
 
 
 
 
73
  pipe = self.load_model(
74
  stable_model_path=stable_model_path,
75
  controlnet_model_path=controlnet_model_path,
@@ -84,7 +89,9 @@ class StableDiffusionControlNetInpaintCannyGenerator:
84
 
85
  output = pipe(
86
  prompt=prompt,
87
- image=image,
 
 
88
  negative_prompt=negative_prompt,
89
  num_images_per_prompt=num_images_per_prompt,
90
  num_inference_steps=num_inference_step,
 
2
  import gradio as gr
3
  import numpy as np
4
  import torch
5
+ from diffusers import ControlNetModel
6
+ from diffusion_webui.diffusion_models.controlnet.controlnet_inpaint.pipeline_stable_diffusion_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
7
  from PIL import Image
8
 
9
  from diffusion_webui.utils.model_list import (
 
27
  controlnet = ControlNetModel.from_pretrained(
28
  controlnet_model_path, torch_dtype=torch.float16
29
  )
30
+ self.pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
31
  pretrained_model_name_or_path=stable_model_path,
32
  controlnet=controlnet,
33
  safety_checker=None,
 
69
  seed_generator: int,
70
  ):
71
 
72
+ normal_image = image_path["image"].convert("RGB").resize((512, 512))
73
+ mask_image = image_path["mask"].convert("RGB").resize((512, 512))
74
+ normal_image = np.array(normal_image)
75
+ mask_image = np.array(mask_image)
76
+
77
+ control_image = self.controlnet_canny_inpaint(image_path=image_path)
78
  pipe = self.load_model(
79
  stable_model_path=stable_model_path,
80
  controlnet_model_path=controlnet_model_path,
 
89
 
90
  output = pipe(
91
  prompt=prompt,
92
+ image=normal_image,
93
+ mask_image=mask_image,
94
+ control_image=control_image,
95
  negative_prompt=negative_prompt,
96
  num_images_per_prompt=num_images_per_prompt,
97
  num_inference_steps=num_inference_step,