Tech-Meld commited on
Commit
4abbcc0
·
verified ·
1 Parent(s): 8605a7d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -3
app.py CHANGED
@@ -3,6 +3,8 @@ from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, EulerDisc
3
  from huggingface_hub import hf_hub_download
4
  from safetensors.torch import load_file
5
  import gradio as gr
 
 
6
 
7
  base = "stabilityai/stable-diffusion-xl-base-1.0"
8
  repo = "ByteDance/SDXL-Lightning"
@@ -12,24 +14,32 @@ ckpt = "sdxl_lightning_4step_unet.safetensors"
12
  unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cpu")
13
  unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device="cpu"))
14
  pipe = StableDiffusionXLPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float32).to("cpu")
 
 
15
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
16
 
17
  def generate_images(prompt, num_inference_steps, guidance_scale, batch_size):
18
- images = pipe(prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, batch_size=batch_size).images
 
19
  return images
20
 
21
  # Define Gradio interface
 
 
 
 
 
22
  iface = gr.Interface(
23
  fn=generate_images,
24
  inputs=[
25
  gr.Textbox(label="Prompt"),
26
- gr.Slider(label="Num Inference Steps", minimum=1, maximum=50, step=1, value=4),
27
  gr.Slider(label="Guidance Scale", minimum=0, maximum=20, step=0.1, value=0),
28
- gr.Slider(label="Batch Size", minimum=1, maximum=8, step=1, value=2),
29
  ],
30
  outputs=gr.Gallery(label="Generated Images"),
31
  title="SDXL Lightning 4-Step Inference (CPU)",
32
  description="Generate images with Stable Diffusion XL Lightning 4-Step model on CPU.",
 
33
  )
34
 
35
  iface.launch()
 
3
  from huggingface_hub import hf_hub_download
4
  from safetensors.torch import load_file
5
  import gradio as gr
6
+ from tqdm.auto import tqdm
7
+ import psutil
8
 
9
  base = "stabilityai/stable-diffusion-xl-base-1.0"
10
  repo = "ByteDance/SDXL-Lightning"
 
14
  unet = UNet2DConditionModel.from_config(base, subfolder="unet").to("cpu")
15
  unet.load_state_dict(load_file(hf_hub_download(repo, ckpt), device="cpu"))
16
  pipe = StableDiffusionXLPipeline.from_pretrained(base, unet=unet, torch_dtype=torch.float32).to("cpu")
17
+
18
+ # Ensure sampler uses "trailing" timesteps.
19
  pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
20
 
21
  def generate_images(prompt, num_inference_steps, guidance_scale, batch_size):
22
+ with tqdm(total=num_inference_steps, desc="Inference Progress") as pbar:
23
+ images = pipe(prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, batch_size=batch_size, progress_bar=pbar).images
24
  return images
25
 
26
  # Define Gradio interface
27
+ def get_cpu_info():
28
+ cpu_name = psutil.cpu_freq().brand
29
+ memory_available = psutil.virtual_memory().available // 1024 // 1024
30
+ return f"CPU: {cpu_name}, Memory: {memory_available} MB"
31
+
32
  iface = gr.Interface(
33
  fn=generate_images,
34
  inputs=[
35
  gr.Textbox(label="Prompt"),
 
36
  gr.Slider(label="Guidance Scale", minimum=0, maximum=20, step=0.1, value=0),
37
+ gr.Slider(label="Batch Size", minimum=1, maximum=4, step=1, value=1),
38
  ],
39
  outputs=gr.Gallery(label="Generated Images"),
40
  title="SDXL Lightning 4-Step Inference (CPU)",
41
  description="Generate images with Stable Diffusion XL Lightning 4-Step model on CPU.",
42
+ extra_info=get_cpu_info,
43
  )
44
 
45
  iface.launch()