Tech-Meld commited on
Commit
3df52b6
·
verified ·
1 Parent(s): 27832bc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -4
app.py CHANGED
@@ -2,14 +2,29 @@ import gradio as gr
2
  import numpy as np
3
  import random
4
  import torch
5
- from diffusers import StableDiffusion3Pipeline, SD3Transformer2DModel, FlowMatchEulerDiscreteScheduler
 
6
  import spaces
7
 
8
  device = "cuda" if torch.cuda.is_available() else "cpu"
9
  dtype = torch.float16
10
 
11
- repo = "stabilityai/stable-cascade"
12
- pipe = StableDiffusion3Pipeline.from_pretrained(repo, torch_dtype=torch.float16).to(device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
  MAX_SEED = np.iinfo(np.int32).max
15
  MAX_IMAGE_SIZE = 1344
@@ -22,6 +37,7 @@ def infer(prompts, negative_prompts, seeds, randomize_seeds, widths, heights, gu
22
 
23
  generator = torch.Generator().manual_seed(seeds[i])
24
 
 
25
  image = pipe(
26
  prompt=prompt,
27
  negative_prompt=negative_prompts[i],
@@ -53,7 +69,7 @@ with gr.Blocks(css=css) as demo:
53
 
54
  with gr.Column(elem_id="col-container"):
55
  gr.Markdown(f"""
56
- # Demo [Automated Stable Diffusion 3 Medium](https://huggingface.co/stabilityai/stable-diffusion-3-medium)
57
  """)
58
 
59
  with gr.Row():
 
2
  import numpy as np
3
  import random
4
  import torch
5
+ from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler, AutoencoderKL, UNet2DConditionModel
6
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
7
  import spaces
8
 
9
  device = "cuda" if torch.cuda.is_available() else "cpu"
10
  dtype = torch.float16
11
 
12
+ # Use the correct repo for SDXL
13
+ repo = "stabilityai/sdxl-turbo" # This is the correct repo for SDXL
14
+
15
+ # Load the model components separately
16
+ vae = AutoencoderKL.from_pretrained(repo, subfolder="vae", torch_dtype=torch.float16).to(device)
17
+ text_encoder = SD3Transformer2DModel.from_pretrained(repo, subfolder="text_encoder", torch_dtype=torch.float16).to(device)
18
+ unet = UNet2DConditionModel.from_pretrained(repo, subfolder="unet", torch_dtype=torch.float16).to(device)
19
+ scheduler = EulerDiscreteScheduler.from_pretrained(repo, subfolder="scheduler", torch_dtype=torch.float16)
20
+
21
+ # Construct the pipeline (this is how you work with SDXL)
22
+ pipe = StableDiffusionPipeline(
23
+ vae=vae,
24
+ text_encoder=text_encoder,
25
+ unet=unet,
26
+ scheduler=scheduler
27
+ ).to(device)
28
 
29
  MAX_SEED = np.iinfo(np.int32).max
30
  MAX_IMAGE_SIZE = 1344
 
37
 
38
  generator = torch.Generator().manual_seed(seeds[i])
39
 
40
+ # SDXL requires a slightly different call format:
41
  image = pipe(
42
  prompt=prompt,
43
  negative_prompt=negative_prompts[i],
 
69
 
70
  with gr.Column(elem_id="col-container"):
71
  gr.Markdown(f"""
72
+ # Demo [Automated Stable Diffusion XL](https://huggingface.co/stabilityai/stablediffusion-xl)
73
  """)
74
 
75
  with gr.Row():