amos1088 commited on
Commit
b12bc82
·
1 Parent(s): 1ac2f7d

test gradio

Browse files
Files changed (1) hide show
  1. app.py +14 -3
app.py CHANGED
@@ -1,12 +1,12 @@
1
  import gradio as gr
2
  import torch
3
  # from diffusers import StableDiffusion3ControlNetPipeline, SD3ControlNetModel, UniPCMultistepScheduler
4
- from diffusers import StableDiffusionXLPipeline
5
  from huggingface_hub import login
6
  import os
7
  import spaces
8
  from diffusers.utils import load_image, make_image_grid
9
- import torch
10
 
11
  token = os.getenv("HF_TOKEN")
12
  login(token=token)
@@ -27,7 +27,18 @@ login(token=token)
27
  # controlnet = SD3ControlNetModel.from_pretrained("alimama-creative/SD3-Controlnet-Softedge", torch_dtype=torch.float16)
28
 
29
  # pipe = StableDiffusion3ControlNetPipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", controlnet=controlnet)
30
- pipe = StableDiffusionXLPipeline.from_pretrained("stabilityai/sdxl-turbo")
 
 
 
 
 
 
 
 
 
 
 
31
  pipe.load_ip_adapter("TencentARC/T2I-Adapter", subfolder="models", weight_name="coadapter-style-sd15v1.pth")
32
  pipe.to("cuda", torch.float16)
33
 
 
1
  import gradio as gr
2
  import torch
3
  # from diffusers import StableDiffusion3ControlNetPipeline, SD3ControlNetModel, UniPCMultistepScheduler
4
+ from diffusers import StableDiffusionXLPipeline,T2IAdapter
5
  from huggingface_hub import login
6
  import os
7
  import spaces
8
  from diffusers.utils import load_image, make_image_grid
9
+ from diffusers import StableDiffusionXLAdapterPipeline, T2IAdapter, EulerAncestralDiscreteScheduler, AutoencoderKL
10
 
11
  token = os.getenv("HF_TOKEN")
12
  login(token=token)
 
27
  # controlnet = SD3ControlNetModel.from_pretrained("alimama-creative/SD3-Controlnet-Softedge", torch_dtype=torch.float16)
28
 
29
  # pipe = StableDiffusion3ControlNetPipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", controlnet=controlnet)
30
+ adapter = T2IAdapter.from_pretrained(
31
+ "TencentARC/t2i-adapter-lineart-sdxl-1.0", torch_dtype=torch.float16, varient="fp16"
32
+ )
33
+
34
+ model_id = 'stabilityai/stable-diffusion-xl-base-1.0'
35
+ euler_a = EulerAncestralDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
36
+ vae=AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
37
+ pipe = StableDiffusionXLAdapterPipeline.from_pretrained(
38
+ model_id, vae=vae, adapter=adapter, scheduler=euler_a, torch_dtype=torch.float16, variant="fp16",
39
+ ).to("cuda")
40
+ pipe.enable_xformers_memory_efficient_attention()
41
+
42
  pipe.load_ip_adapter("TencentARC/T2I-Adapter", subfolder="models", weight_name="coadapter-style-sd15v1.pth")
43
  pipe.to("cuda", torch.float16)
44