fffiloni commited on
Commit
4cda365
·
1 Parent(s): 7b5957f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -7
app.py CHANGED
@@ -17,7 +17,7 @@ using our implementation of the RAFT model. We will also see how to convert the
17
  predicted flows to RGB images for visualization.
18
  """
19
 
20
- from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
21
  from diffusers import UniPCMultistepScheduler
22
 
23
  import cv2
@@ -48,7 +48,7 @@ high_threshold = 200
48
 
49
  # Models
50
  controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
51
- pipe = StableDiffusionControlNetPipeline.from_pretrained(
52
  "runwayml/stable-diffusion-v1-5", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16
53
  )
54
  pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
@@ -73,11 +73,12 @@ def get_canny_filter(image):
73
  return canny_image
74
 
75
 
76
- def generate_images(prompt, canny_image):
77
 
78
  output = pipe(
79
- prompt,
80
- canny_image,
 
81
  generator=generator,
82
  num_images_per_prompt=1,
83
  num_inference_steps=20,
@@ -128,7 +129,7 @@ def infer():
128
 
129
  pil2diff_img = Image.open("./basket1.jpg")
130
  canny_image = get_canny_filter(pil2diff_img)
131
- diffused_img = generate_images(prompt, canny_image)
132
  print(f"DIFFUSED IMG: {diffused_img[1]}")
133
 
134
  diffused_img[1].save("diffused_input1.jpg")
@@ -275,7 +276,7 @@ def infer():
275
 
276
  pil2diff_blend = Image.open("blended2.jpg")
277
  canny_image = get_canny_filter(pil2diff_blend)
278
- diffused_blend = generate_images(prompt, canny_image)
279
  print(f"DIFFUSED IMG: {diffused_blend[1]}")
280
 
281
  diffused_blend[1].save("diffused_blended_2.jpg")
 
17
  predicted flows to RGB images for visualization.
18
  """
19
 
20
+ from diffusers import DiffusionPipeline, ControlNetModel
21
  from diffusers import UniPCMultistepScheduler
22
 
23
  import cv2
 
48
 
49
  # Models
50
  controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
51
+ pipe = DiffusionPipeline.from_pretrained(
52
  "runwayml/stable-diffusion-v1-5", controlnet=controlnet, safety_checker=None, torch_dtype=torch.float16
53
  )
54
  pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
 
73
  return canny_image
74
 
75
 
76
+ def generate_images(prompt, canny_image, image):
77
 
78
  output = pipe(
79
+ #prompt,
80
+ controlnet_conditioning_image=canny_image,
81
+ image = image,
82
  generator=generator,
83
  num_images_per_prompt=1,
84
  num_inference_steps=20,
 
129
 
130
  pil2diff_img = Image.open("./basket1.jpg")
131
  canny_image = get_canny_filter(pil2diff_img)
132
+ diffused_img = generate_images(prompt, canny_image, pil2diff_img)
133
  print(f"DIFFUSED IMG: {diffused_img[1]}")
134
 
135
  diffused_img[1].save("diffused_input1.jpg")
 
276
 
277
  pil2diff_blend = Image.open("blended2.jpg")
278
  canny_image = get_canny_filter(pil2diff_blend)
279
+ diffused_blend = generate_images(prompt, canny_image, pil2diff_blend)
280
  print(f"DIFFUSED IMG: {diffused_blend[1]}")
281
 
282
  diffused_blend[1].save("diffused_blended_2.jpg")