1inkusFace commited on
Commit
0fc2367
·
verified ·
1 Parent(s): 5e4f5fd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -0
app.py CHANGED
@@ -69,8 +69,11 @@ pipe = StableDiffusion3Pipeline.from_pretrained(
69
  vae=None,
70
  #vae=AutoencoderKL.from_pretrained("ford442/stable-diffusion-3.5-large-fp32", use_safetensors=True, subfolder='vae',token=True),
71
  #scheduler = FlowMatchHeunDiscreteScheduler.from_pretrained('ford442/stable-diffusion-3.5-large-bf16', subfolder='scheduler',token=True),
 
72
  # text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True),
 
73
  # text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True),
 
74
  # text_encoder_3=T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True),
75
  #tokenizer=CLIPTokenizer.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", add_prefix_space=True, subfolder="tokenizer", token=True),
76
  #tokenizer_2=CLIPTokenizer.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", add_prefix_space=True, subfolder="tokenizer_2", token=True),
@@ -102,6 +105,9 @@ def infer_30(
102
  num_inference_steps,
103
  progress=gr.Progress(track_tqdm=True),
104
  ):
 
 
 
105
  torch.set_float32_matmul_precision("highest")
106
  seed = random.randint(0, MAX_SEED)
107
  generator = torch.Generator(device='cuda').manual_seed(seed)
@@ -150,6 +156,9 @@ def infer_60(
150
  num_inference_steps,
151
  progress=gr.Progress(track_tqdm=True),
152
  ):
 
 
 
153
  torch.set_float32_matmul_precision("highest")
154
  seed = random.randint(0, MAX_SEED)
155
  generator = torch.Generator(device='cuda').manual_seed(seed)
@@ -196,6 +205,9 @@ def infer_90(
196
  num_inference_steps,
197
  progress=gr.Progress(track_tqdm=True),
198
  ):
 
 
 
199
  torch.set_float32_matmul_precision("highest")
200
  seed = random.randint(0, MAX_SEED)
201
  generator = torch.Generator(device='cuda').manual_seed(seed)
 
69
  vae=None,
70
  #vae=AutoencoderKL.from_pretrained("ford442/stable-diffusion-3.5-large-fp32", use_safetensors=True, subfolder='vae',token=True),
71
  #scheduler = FlowMatchHeunDiscreteScheduler.from_pretrained('ford442/stable-diffusion-3.5-large-bf16', subfolder='scheduler',token=True),
72
+ text_encoder=None #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True),
73
  # text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True),
74
+ text_encoder_2=None #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True),
75
  # text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True),
76
+ text_encoder_3=None #T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True),
77
  # text_encoder_3=T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True),
78
  #tokenizer=CLIPTokenizer.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", add_prefix_space=True, subfolder="tokenizer", token=True),
79
  #tokenizer_2=CLIPTokenizer.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", add_prefix_space=True, subfolder="tokenizer_2", token=True),
 
105
  num_inference_steps,
106
  progress=gr.Progress(track_tqdm=True),
107
  ):
108
+ text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
109
+ text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
110
+ text_encoder_3=T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
111
  torch.set_float32_matmul_precision("highest")
112
  seed = random.randint(0, MAX_SEED)
113
  generator = torch.Generator(device='cuda').manual_seed(seed)
 
156
  num_inference_steps,
157
  progress=gr.Progress(track_tqdm=True),
158
  ):
159
+ text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
160
+ text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
161
+ text_encoder_3=T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
162
  torch.set_float32_matmul_precision("highest")
163
  seed = random.randint(0, MAX_SEED)
164
  generator = torch.Generator(device='cuda').manual_seed(seed)
 
205
  num_inference_steps,
206
  progress=gr.Progress(track_tqdm=True),
207
  ):
208
+ text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
209
+ text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
210
+ text_encoder_3=T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
211
  torch.set_float32_matmul_precision("highest")
212
  seed = random.randint(0, MAX_SEED)
213
  generator = torch.Generator(device='cuda').manual_seed(seed)