Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -12,16 +12,18 @@ import numpy as np
|
|
12 |
import spaces
|
13 |
|
14 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
|
|
15 |
|
16 |
def load_models(model_path="MeissonFlow/Meissonic",
|
17 |
transformer_path="MeissonFlow/Muddit"):
|
18 |
model = SymmetricTransformer2DModel.from_pretrained(
|
19 |
transformer_path,
|
20 |
subfolder="1024/transformer",
|
|
|
21 |
)
|
22 |
-
vq_model = VQModel.from_pretrained(model_path, subfolder="vqvae")
|
23 |
-
text_encoder = CLIPTextModelWithProjection.from_pretrained(model_path, subfolder="text_encoder")
|
24 |
-
tokenizer = CLIPTokenizer.from_pretrained(model_path, subfolder="tokenizer")
|
25 |
scheduler = Scheduler.from_pretrained(model_path, subfolder="scheduler")
|
26 |
|
27 |
pipe = UnifiedPipeline(
|
@@ -65,7 +67,7 @@ def image_to_text(image, prompt, resolution=1024, steps=64, cfg=9.0):
|
|
65 |
width=resolution,
|
66 |
guidance_scale=cfg,
|
67 |
num_inference_steps=steps,
|
68 |
-
mask_token_embedding="
|
69 |
generator=torch.manual_seed(42),
|
70 |
)
|
71 |
|
@@ -87,7 +89,7 @@ def text_to_image(prompt, negative_prompt, num_images=1, resolution=1024, steps=
|
|
87 |
width=resolution,
|
88 |
guidance_scale=cfg,
|
89 |
num_inference_steps=steps,
|
90 |
-
mask_token_embedding="
|
91 |
generator=torch.manual_seed(42),
|
92 |
)
|
93 |
|
|
|
12 |
import spaces
|
13 |
|
14 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
15 |
+
dtype = torch.bfloat16
|
16 |
|
17 |
def load_models(model_path="MeissonFlow/Meissonic",
|
18 |
transformer_path="MeissonFlow/Muddit"):
|
19 |
model = SymmetricTransformer2DModel.from_pretrained(
|
20 |
transformer_path,
|
21 |
subfolder="1024/transformer",
|
22 |
+
torch_dtype=dtype)
|
23 |
)
|
24 |
+
vq_model = VQModel.from_pretrained(model_path, subfolder="vqvae",torch_dtype=dtype)
|
25 |
+
text_encoder = CLIPTextModelWithProjection.from_pretrained(model_path, subfolder="text_encoder",torch_dtype=dtype)
|
26 |
+
tokenizer = CLIPTokenizer.from_pretrained(model_path, subfolder="tokenizer",torch_dtype=dtype)
|
27 |
scheduler = Scheduler.from_pretrained(model_path, subfolder="scheduler")
|
28 |
|
29 |
pipe = UnifiedPipeline(
|
|
|
67 |
width=resolution,
|
68 |
guidance_scale=cfg,
|
69 |
num_inference_steps=steps,
|
70 |
+
mask_token_embedding="./mask_token_embedding.pth",
|
71 |
generator=torch.manual_seed(42),
|
72 |
)
|
73 |
|
|
|
89 |
width=resolution,
|
90 |
guidance_scale=cfg,
|
91 |
num_inference_steps=steps,
|
92 |
+
mask_token_embedding="./mask_token_embedding.pth",
|
93 |
generator=torch.manual_seed(42),
|
94 |
)
|
95 |
|