amos1088 commited on
Commit
a4cc7b2
·
1 Parent(s): e92cda4

test gradio

Browse files
Files changed (2) hide show
  1. app.py +50 -9
  2. app_image_style.py +2 -6
app.py CHANGED
@@ -1,36 +1,72 @@
 
 
 
 
 
1
  import gradio as gr
2
  from huggingface_hub import login
3
  import os
4
- import spaces
 
 
 
 
 
5
  from diffusers import AutoPipelineForText2Image
6
  from diffusers.utils import load_image
7
  import torch
8
- import tempfile
 
 
9
 
10
  token = os.getenv("HF_TOKEN")
11
  login(token=token)
12
 
13
 
14
- pipeline = AutoPipelineForText2Image.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16).to("cuda")
15
- pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
 
 
 
 
17
 
18
 
19
  @spaces.GPU
20
- def generate_image(prompt, reference_image, controlnet_conditioning_scale):
21
  style_images = [load_image(f.name) for f in reference_image]
22
 
23
  pipeline.set_ip_adapter_scale(controlnet_conditioning_scale)
24
 
25
- image = pipeline(
26
  prompt=prompt,
27
  ip_adapter_image=[style_images],
28
  negative_prompt="",
29
  guidance_scale=5,
30
  num_inference_steps=30,
31
- ).images[0]
 
 
 
32
 
33
- return image
34
 
35
  # Set up Gradio interface
36
  interface = gr.Interface(
@@ -38,8 +74,10 @@ interface = gr.Interface(
38
  inputs=[
39
  gr.Textbox(label="Prompt"),
40
  # gr.Image( type= "filepath",label="Reference Image (Style)"),
41
- gr.File(file_count="multiple",label="Reference Image (Style)"),
42
  gr.Slider(label="Control Net Conditioning Scale", minimum=0, maximum=1.0, step=0.1, value=1.0),
 
 
43
  ],
44
  outputs="image",
45
  title="Image Generation with Stable Diffusion 3 medium and ControlNet",
@@ -48,3 +86,6 @@ interface = gr.Interface(
48
  )
49
 
50
  interface.launch()
 
 
 
 
1
+ import torch
2
+ from diffusers.models import MotionAdapter
3
+ from diffusers import AnimateDiffSDXLPipeline, DDIMScheduler
4
+ from diffusers.utils import export_to_gif
5
+
6
  import gradio as gr
7
  from huggingface_hub import login
8
  import os
9
+ import spaces,tempfile
10
+ import torch
11
+ from diffusers import StableDiffusionXLPipeline
12
+ from PIL import Image
13
+ import torch
14
+ from diffusers import AutoPipelineForText2Image, DDIMScheduler
15
  from diffusers import AutoPipelineForText2Image
16
  from diffusers.utils import load_image
17
  import torch
18
+ from diffusers.models import MotionAdapter
19
+ from diffusers import AnimateDiffSDXLPipeline, DDIMScheduler
20
+ from diffusers.utils import export_to_gif
21
 
22
  token = os.getenv("HF_TOKEN")
23
  login(token=token)
24
 
25
 
26
+ adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-sdxl-beta", torch_dtype=torch.float16)
27
+
28
+ model_id = "stabilityai/sdxl-turbo"
29
+ scheduler = DDIMScheduler.from_pretrained(
30
+ model_id,
31
+ subfolder="scheduler",
32
+ clip_sample=False,
33
+ timestep_spacing="linspace",
34
+ beta_schedule="linear",
35
+ steps_offset=1,
36
+ )
37
+ pipe = AnimateDiffSDXLPipeline.from_pretrained(
38
+ model_id,
39
+ motion_adapter=adapter,
40
+ scheduler=scheduler,
41
+ torch_dtype=torch.float16,
42
+ variant="fp16",
43
+ ).to("cuda")
44
+ pipe.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin")
45
 
46
+ # enable memory savings
47
+ pipe.enable_vae_slicing()
48
+ pipe.enable_vae_tiling()
49
+ pipeline = pipe
50
 
51
 
52
  @spaces.GPU
53
+ def generate_image(prompt, reference_image, controlnet_conditioning_scale,num_frames):
54
  style_images = [load_image(f.name) for f in reference_image]
55
 
56
  pipeline.set_ip_adapter_scale(controlnet_conditioning_scale)
57
 
58
+ output = pipeline(
59
  prompt=prompt,
60
  ip_adapter_image=[style_images],
61
  negative_prompt="",
62
  guidance_scale=5,
63
  num_inference_steps=30,
64
+ num_frames=num_frames,
65
+ )
66
+ frames = output.frames[0]
67
+ export_to_gif(frames, "animation.gif")
68
 
69
+ return "animation.gif"
70
 
71
  # Set up Gradio interface
72
  interface = gr.Interface(
 
74
  inputs=[
75
  gr.Textbox(label="Prompt"),
76
  # gr.Image( type= "filepath",label="Reference Image (Style)"),
77
+ gr.File(type="file",file_count="multiple",label="Reference Image (Style)"),
78
  gr.Slider(label="Control Net Conditioning Scale", minimum=0, maximum=1.0, step=0.1, value=1.0),
79
+ gr.Slider(label="Number of frames", minimum=0, maximum=1.0, step=0.1, value=1.0),
80
+
81
  ],
82
  outputs="image",
83
  title="Image Generation with Stable Diffusion 3 medium and ControlNet",
 
86
  )
87
 
88
  interface.launch()
89
+
90
+
91
+
app_image_style.py CHANGED
@@ -2,14 +2,10 @@ import gradio as gr
2
  from huggingface_hub import login
3
  import os
4
  import spaces
5
- import torch
6
- from diffusers import StableDiffusionXLPipeline
7
- from PIL import Image
8
- import torch
9
- from diffusers import AutoPipelineForText2Image, DDIMScheduler
10
  from diffusers import AutoPipelineForText2Image
11
  from diffusers.utils import load_image
12
  import torch
 
13
 
14
  token = os.getenv("HF_TOKEN")
15
  login(token=token)
@@ -22,7 +18,7 @@ pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name=
22
 
23
  @spaces.GPU
24
  def generate_image(prompt, reference_image, controlnet_conditioning_scale):
25
- style_images = [load_image(f.file.name) for f in reference_image]
26
 
27
  pipeline.set_ip_adapter_scale(controlnet_conditioning_scale)
28
 
 
2
  from huggingface_hub import login
3
  import os
4
  import spaces
 
 
 
 
 
5
  from diffusers import AutoPipelineForText2Image
6
  from diffusers.utils import load_image
7
  import torch
8
+ import tempfile
9
 
10
  token = os.getenv("HF_TOKEN")
11
  login(token=token)
 
18
 
19
  @spaces.GPU
20
  def generate_image(prompt, reference_image, controlnet_conditioning_scale):
21
+ style_images = [load_image(f.name) for f in reference_image]
22
 
23
  pipeline.set_ip_adapter_scale(controlnet_conditioning_scale)
24