yonishafir commited on
Commit
feee439
·
verified ·
1 Parent(s): 9566feb

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -5
README.md CHANGED
@@ -109,7 +109,6 @@ def get_masked_image(image, image_mask, width, height):
109
  image_transforms = transforms.Compose(
110
  [
111
  transforms.ToTensor(),
112
- # transforms.Normalize([0.5], [0.5]),
113
  ]
114
  )
115
 
@@ -125,7 +124,6 @@ mask_image = mask_image.convert("L")
125
 
126
  width, height = init_image.size
127
 
128
-
129
  # Load, init model
130
  controlnet = ControlNetModel().from_config('briaai/DEV-ControlNetInpaintingFast', torch_dtype=torch.float16)
131
  controlnet.controlnet_cond_embedding = ControlNetConditioningEmbedding(
@@ -133,7 +131,7 @@ controlnet.controlnet_cond_embedding = ControlNetConditioningEmbedding(
133
  conditioning_channels = 5
134
  )
135
 
136
- # controlnet.load_state_dict(torch.load('briaai/DEV-ControlNetInpaintingFast'))
137
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
138
  pipe = StableDiffusionXLControlNetPipeline.from_pretrained("briaai/BRIA-2.3", controlnet=controlnet.to(dtype=torch.float16), torch_dtype=torch.float16, vae=vae) #force_zeros_for_empty_prompt=False, # vae=vae)
139
 
@@ -146,6 +144,7 @@ pipe.enable_xformers_memory_efficient_attention()
146
 
147
  generator = torch.Generator(device='cuda:0').manual_seed(123456)
148
 
 
149
  vae = pipe.vae
150
 
151
  masked_image, image_mask, masked_image_to_present = get_masked_image(init_image, mask_image, width, height)
@@ -153,7 +152,6 @@ masked_image_tensor = image_transforms(masked_image)
153
  masked_image_tensor = (masked_image_tensor - 0.5) / 0.5
154
 
155
  masked_image_tensor = masked_image_tensor.unsqueeze(0).to(device="cuda")
156
- # masked_image_tensor = masked_image_tensor.permute((0,3,1,2))
157
  control_latents = vae.encode(
158
  masked_image_tensor[:, :3, :, :].to(vae.dtype)
159
  ).latent_dist.sample()
@@ -185,6 +183,5 @@ gen_img = pipe(negative_prompt=default_negative_prompt, prompt=prompt,
185
  generator=generator).images[0]
186
 
187
 
188
- gen_img.save("./a_park_bench.png")
189
  ```
190
 
 
109
  image_transforms = transforms.Compose(
110
  [
111
  transforms.ToTensor(),
 
112
  ]
113
  )
114
 
 
124
 
125
  width, height = init_image.size
126
 
 
127
  # Load, init model
128
  controlnet = ControlNetModel().from_config('briaai/DEV-ControlNetInpaintingFast', torch_dtype=torch.float16)
129
  controlnet.controlnet_cond_embedding = ControlNetConditioningEmbedding(
 
131
  conditioning_channels = 5
132
  )
133
 
134
+ controlnet = ControlNetModel().from_pretrained("briaai/DEV-ControlNetInpaintingFast", torch_dtype=torch.float16)
135
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
136
  pipe = StableDiffusionXLControlNetPipeline.from_pretrained("briaai/BRIA-2.3", controlnet=controlnet.to(dtype=torch.float16), torch_dtype=torch.float16, vae=vae) #force_zeros_for_empty_prompt=False, # vae=vae)
137
 
 
144
 
145
  generator = torch.Generator(device='cuda:0').manual_seed(123456)
146
 
147
+
148
  vae = pipe.vae
149
 
150
  masked_image, image_mask, masked_image_to_present = get_masked_image(init_image, mask_image, width, height)
 
152
  masked_image_tensor = (masked_image_tensor - 0.5) / 0.5
153
 
154
  masked_image_tensor = masked_image_tensor.unsqueeze(0).to(device="cuda")
 
155
  control_latents = vae.encode(
156
  masked_image_tensor[:, :3, :, :].to(vae.dtype)
157
  ).latent_dist.sample()
 
183
  generator=generator).images[0]
184
 
185
 
 
186
  ```
187