Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -105,9 +105,9 @@ def infer_30(
|
|
105 |
num_inference_steps,
|
106 |
progress=gr.Progress(track_tqdm=True),
|
107 |
):
|
108 |
-
text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
|
109 |
-
text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
|
110 |
-
text_encoder_3=T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
|
111 |
torch.set_float32_matmul_precision("highest")
|
112 |
seed = random.randint(0, MAX_SEED)
|
113 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
@@ -156,9 +156,9 @@ def infer_60(
|
|
156 |
num_inference_steps,
|
157 |
progress=gr.Progress(track_tqdm=True),
|
158 |
):
|
159 |
-
text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
|
160 |
-
text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
|
161 |
-
text_encoder_3=T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
|
162 |
torch.set_float32_matmul_precision("highest")
|
163 |
seed = random.randint(0, MAX_SEED)
|
164 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
@@ -205,9 +205,9 @@ def infer_90(
|
|
205 |
num_inference_steps,
|
206 |
progress=gr.Progress(track_tqdm=True),
|
207 |
):
|
208 |
-
text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
|
209 |
-
text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
|
210 |
-
text_encoder_3=T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
|
211 |
torch.set_float32_matmul_precision("highest")
|
212 |
seed = random.randint(0, MAX_SEED)
|
213 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
|
|
105 |
num_inference_steps,
|
106 |
progress=gr.Progress(track_tqdm=True),
|
107 |
):
|
108 |
+
pipe.text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
|
109 |
+
pipe.text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
|
110 |
+
pipe.text_encoder_3=T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
|
111 |
torch.set_float32_matmul_precision("highest")
|
112 |
seed = random.randint(0, MAX_SEED)
|
113 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
|
|
156 |
num_inference_steps,
|
157 |
progress=gr.Progress(track_tqdm=True),
|
158 |
):
|
159 |
+
pipe.text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
|
160 |
+
pipe.text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
|
161 |
+
pipe.text_encoder_3=T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
|
162 |
torch.set_float32_matmul_precision("highest")
|
163 |
seed = random.randint(0, MAX_SEED)
|
164 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|
|
|
205 |
num_inference_steps,
|
206 |
progress=gr.Progress(track_tqdm=True),
|
207 |
):
|
208 |
+
pipe.text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder', token=True).to(device=device, dtype=torch.bfloat16)
|
209 |
+
pipe.text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True).to(device=device, dtype=torch.bfloat16)
|
210 |
+
pipe.text_encoder_3=T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True).to(device=device, dtype=torch.bfloat16)
|
211 |
torch.set_float32_matmul_precision("highest")
|
212 |
seed = random.randint(0, MAX_SEED)
|
213 |
generator = torch.Generator(device='cuda').manual_seed(seed)
|