amos1088 commited on
Commit
9b01735
·
1 Parent(s): bcbf6e0

test gradio

Browse files
Files changed (1) hide show
  1. app.py +7 -4
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import gradio as gr
2
  import torch
3
- from diffusers import StableDiffusion3ControlNetPipeline, SD3ControlNetModel, UniPCMultistepScheduler
 
4
  from huggingface_hub import login
5
  import os
6
  import spaces
@@ -23,9 +24,11 @@ login(token=token)
23
  # pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
24
  # pipe.to("cuda", torch.float16)
25
 
26
- controlnet = SD3ControlNetModel.from_pretrained("alimama-creative/SD3-Controlnet-Softedge", torch_dtype=torch.float16)
27
 
28
- pipe = StableDiffusion3ControlNetPipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", controlnet=controlnet)
 
 
29
  pipe.to("cuda", torch.float16)
30
 
31
 
@@ -35,7 +38,7 @@ def generate_image(prompt, reference_image, controlnet_conditioning_scale):
35
  # Generate the image with ControlNet conditioning
36
  generated_image = pipe(
37
  prompt=prompt,
38
- control_image=load_image(reference_image),
39
  controlnet_conditioning_scale=controlnet_conditioning_scale,
40
  ).images[0]
41
  return generated_image
 
1
  import gradio as gr
2
  import torch
3
+ # from diffusers import StableDiffusion3ControlNetPipeline, SD3ControlNetModel, UniPCMultistepScheduler
4
+ from diffusers import AutoPipelineForText2Image
5
  from huggingface_hub import login
6
  import os
7
  import spaces
 
24
  # pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
25
  # pipe.to("cuda", torch.float16)
26
 
27
+ # controlnet = SD3ControlNetModel.from_pretrained("alimama-creative/SD3-Controlnet-Softedge", torch_dtype=torch.float16)
28
 
29
+ # pipe = StableDiffusion3ControlNetPipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", controlnet=controlnet)
30
+ pipe = AutoPipelineForText2Image.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers")
31
+ pipe.load_ip_adapter("TencentARC/T2I-Adapter", subfolder="models", weight_name="coadapter-style-sd15v1.pth")
32
  pipe.to("cuda", torch.float16)
33
 
34
 
 
38
  # Generate the image with ControlNet conditioning
39
  generated_image = pipe(
40
  prompt=prompt,
41
+ ip_adapter_image=load_image(reference_image),
42
  controlnet_conditioning_scale=controlnet_conditioning_scale,
43
  ).images[0]
44
  return generated_image