Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -4,8 +4,8 @@ import numpy as np
|
|
4 |
#import tensorrt as trt
|
5 |
import random
|
6 |
import torch
|
7 |
-
from diffusers import StableDiffusion3Pipeline, AutoencoderKL
|
8 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
|
9 |
#from threading import Thread
|
10 |
#from transformers import pipeline
|
11 |
from transformers import T5Tokenizer, T5ForConditionalGeneration
|
@@ -63,7 +63,13 @@ checkpoint = "microsoft/Phi-3.5-mini-instruct"
|
|
63 |
#vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False) #, device_map='cpu') #.to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
64 |
|
65 |
pipe = StableDiffusion3Pipeline.from_pretrained(
|
66 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
token=True,
|
68 |
use_safetensors=False,
|
69 |
)
|
|
|
4 |
#import tensorrt as trt
|
5 |
import random
|
6 |
import torch
|
7 |
+
from diffusers import StableDiffusion3Pipeline, AutoencoderKL, SD3Transformer2DModel
|
8 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer, CLIPTextModelWithProjection, T5EncoderModel
|
9 |
#from threading import Thread
|
10 |
#from transformers import pipeline
|
11 |
from transformers import T5Tokenizer, T5ForConditionalGeneration
|
|
|
63 |
#vaeXL = AutoencoderKL.from_pretrained("stabilityai/sdxl-vae", safety_checker=None, use_safetensors=False) #, device_map='cpu') #.to(torch.bfloat16) #.to(device=device, dtype=torch.bfloat16)
|
64 |
|
65 |
pipe = StableDiffusion3Pipeline.from_pretrained(
|
66 |
+
"stabilityai/stable-diffusion-3.5-large",
|
67 |
+
#"ford442/stable-diffusion-3.5-large-bf16",
|
68 |
+
vae=AutoencoderKL.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='vae',token=True)
|
69 |
+
transformer=SD3Transformer2DModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='transformer',token=True)
|
70 |
+
text_encoder=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder',token=True),
|
71 |
+
text_encoder_2=CLIPTextModelWithProjection.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_2',token=True),
|
72 |
+
text_encoder_3=T5EncoderModel.from_pretrained("ford442/stable-diffusion-3.5-large-bf16", subfolder='text_encoder_3',token=True),
|
73 |
token=True,
|
74 |
use_safetensors=False,
|
75 |
)
|