1inkusFace commited on
Commit
2dce68d
·
verified ·
1 Parent(s): a11c486

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -10
app.py CHANGED
@@ -81,7 +81,10 @@ pipe = StableDiffusion3Pipeline.from_pretrained(
81
  #torch_dtype=torch.bfloat16,
82
  #use_safetensors=False,
83
  )
84
-
 
 
 
85
  pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/UltraReal.safetensors")
86
 
87
  pipe.to(device=device, dtype=torch.bfloat16)
@@ -105,9 +108,9 @@ def infer_30(
105
  num_inference_steps,
106
  progress=gr.Progress(track_tqdm=True),
107
  ):
108
- pipe.text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
109
- pipe.text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
110
- pipe.text_encoder_3=T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
111
  torch.set_float32_matmul_precision("highest")
112
  seed = random.randint(0, MAX_SEED)
113
  generator = torch.Generator(device='cuda').manual_seed(seed)
@@ -156,9 +159,9 @@ def infer_60(
156
  num_inference_steps,
157
  progress=gr.Progress(track_tqdm=True),
158
  ):
159
- pipe.text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
160
- pipe.text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
161
- pipe.text_encoder_3=T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
162
  torch.set_float32_matmul_precision("highest")
163
  seed = random.randint(0, MAX_SEED)
164
  generator = torch.Generator(device='cuda').manual_seed(seed)
@@ -205,9 +208,9 @@ def infer_90(
205
  num_inference_steps,
206
  progress=gr.Progress(track_tqdm=True),
207
  ):
208
- pipe.text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
209
- pipe.text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
210
- pipe.text_encoder_3=T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
211
  torch.set_float32_matmul_precision("highest")
212
  seed = random.randint(0, MAX_SEED)
213
  generator = torch.Generator(device='cuda').manual_seed(seed)
 
81
  #torch_dtype=torch.bfloat16,
82
  #use_safetensors=False,
83
  )
84
+ text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
85
+ text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
86
+ text_encoder_3=T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
87
+
88
  pipe.load_lora_weights("ford442/sdxl-vae-bf16", weight_name="LoRA/UltraReal.safetensors")
89
 
90
  pipe.to(device=device, dtype=torch.bfloat16)
 
108
  num_inference_steps,
109
  progress=gr.Progress(track_tqdm=True),
110
  ):
111
+ pipe.text_encoder=text_encoder #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
112
+ pipe.text_encoder_2=text_encoder_2 #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
113
+ pipe.text_encoder_3=text_encoder_3 #T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
114
  torch.set_float32_matmul_precision("highest")
115
  seed = random.randint(0, MAX_SEED)
116
  generator = torch.Generator(device='cuda').manual_seed(seed)
 
159
  num_inference_steps,
160
  progress=gr.Progress(track_tqdm=True),
161
  ):
162
+ pipe.text_encoder=text_encoder #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
163
+ pipe.text_encoder_2=text_encoder_2 #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
164
+ pipe.text_encoder_3=text_encoder_3 #T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
165
  torch.set_float32_matmul_precision("highest")
166
  seed = random.randint(0, MAX_SEED)
167
  generator = torch.Generator(device='cuda').manual_seed(seed)
 
208
  num_inference_steps,
209
  progress=gr.Progress(track_tqdm=True),
210
  ):
211
+ pipe.text_encoder=text_encoder #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
212
+ pipe.text_encoder_2=text_encoder_2 #CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
213
+ pipe.text_encoder_3=text_encoder_3 #T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
214
  torch.set_float32_matmul_precision("highest")
215
  seed = random.randint(0, MAX_SEED)
216
  generator = torch.Generator(device='cuda').manual_seed(seed)