michaelapplydesign commited on
Commit
98eda10
·
1 Parent(s): 19b5412

test x formers

Browse files
Files changed (1) hide show
  1. app.py +5 -0
app.py CHANGED
@@ -22,6 +22,8 @@ from diffusers import StableDiffusionUpscalePipeline
22
  from diffusers import LDMSuperResolutionPipeline
23
  import cv2
24
  import onnxruntime
 
 
25
 
26
  def removeFurniture(input_img1,
27
  input_img2,
@@ -87,6 +89,9 @@ def upscale(image, prompt):
87
  # pipe = StableDiffusionLatentUpscalePipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16)
88
  pipe = pipe.to(device)
89
  pipe.enable_attention_slicing()
 
 
 
90
 
91
  ret = pipe(prompt=prompt,
92
  image=image,
 
22
  from diffusers import LDMSuperResolutionPipeline
23
  import cv2
24
  import onnxruntime
25
+ import xformers
26
+ # from xformers.ops import MemoryEfficientAttentionFlashAttentionOp
27
 
28
  def removeFurniture(input_img1,
29
  input_img2,
 
89
  # pipe = StableDiffusionLatentUpscalePipeline.from_pretrained("stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16)
90
  pipe = pipe.to(device)
91
  pipe.enable_attention_slicing()
92
+ pipe.enable_xformers_memory_efficient_attention(attention_op=xformers.ops.MemoryEfficientAttentionFlashAttentionOp)
93
+ # Workaround for not accepting attention shape using VAE for Flash Attention
94
+ pipe.vae.enable_xformers_memory_efficient_attention(attention_op=None)
95
 
96
  ret = pipe(prompt=prompt,
97
  image=image,