amos1088 commited on
Commit
ae33459
·
1 Parent(s): 0d4222a

test gradio

Browse files
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -3,7 +3,8 @@ from huggingface_hub import login
3
  import os
4
  import spaces
5
  from diffusers.schedulers import UniPCMultistepScheduler
6
- from diffusers import StableDiffusionXLAdapterPipeline, T2IAdapter, EulerAncestralDiscreteScheduler, AutoencoderKL
 
7
  from diffusers.utils import load_image, make_image_grid
8
  import torch
9
 
@@ -12,15 +13,16 @@ token = os.getenv("HF_TOKEN")
12
  login(token=token)
13
 
14
  model_id = 'stabilityai/sdxl-turbo'
 
 
15
  euler_a = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
16
  vae=AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix",)
17
-
18
- adapter = StableDiffusionXLAdapterPipeline(model_id, vae=vae, adapter=None, scheduler=euler_a, variant="fp16").load_ip_adapter(pretrained_model_name_or_path_or_dict="TencentARC/T2I-Adapter",
19
  subfolder="models",
20
  weight_name="t2iadapter_style_sd14v1.pth")
21
 
22
  pipe = StableDiffusionXLAdapterPipeline.from_pretrained(
23
- model_id, vae=vae, adapter=adapter, scheduler=euler_a, variant="fp16",
24
  )
25
  pipe.enable_xformers_memory_efficient_attention()
26
  pipe.to("cuda", torch.float16)
 
3
  import os
4
  import spaces
5
  from diffusers.schedulers import UniPCMultistepScheduler
6
+ from diffusers import StableDiffusionXLAdapterPipeline, T2IAdapter, EulerAncestralDiscreteScheduler, AutoencoderKL, \
7
+ StableDiffusionXLPipeline
8
  from diffusers.utils import load_image, make_image_grid
9
  import torch
10
 
 
13
  login(token=token)
14
 
15
  model_id = 'stabilityai/sdxl-turbo'
16
+ x = StableDiffusionXLPipeline(model_id)
17
+
18
  euler_a = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
19
  vae=AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix",)
20
+ x.load_ip_adapter(pretrained_model_name_or_path_or_dict="TencentARC/T2I-Adapter",
 
21
  subfolder="models",
22
  weight_name="t2iadapter_style_sd14v1.pth")
23
 
24
  pipe = StableDiffusionXLAdapterPipeline.from_pretrained(
25
+ model_id, vae=vae, adapter=x.adapter, scheduler=euler_a, variant="fp16",
26
  )
27
  pipe.enable_xformers_memory_efficient_attention()
28
  pipe.to("cuda", torch.float16)