Spaces:
Runtime error
Runtime error
Update ootd/inference_ootd_dc.py
Browse files
ootd/inference_ootd_dc.py
CHANGED
|
@@ -25,9 +25,9 @@ from transformers import AutoProcessor, CLIPVisionModelWithProjection
|
|
| 25 |
from transformers import CLIPTextModel, CLIPTokenizer
|
| 26 |
|
| 27 |
VIT_PATH = "openai/clip-vit-large-patch14"
|
| 28 |
-
VAE_PATH = "levihsu/
|
| 29 |
-
UNET_PATH = "levihsu/
|
| 30 |
-
MODEL_PATH = "levihsu/
|
| 31 |
|
| 32 |
class OOTDiffusionDC:
|
| 33 |
|
|
@@ -42,13 +42,13 @@ class OOTDiffusionDC:
|
|
| 42 |
|
| 43 |
unet_garm = UNetGarm2DConditionModel.from_pretrained(
|
| 44 |
UNET_PATH,
|
| 45 |
-
subfolder="unet_garm",
|
| 46 |
torch_dtype=torch.float16,
|
| 47 |
use_safetensors=True,
|
| 48 |
)
|
| 49 |
unet_vton = UNetVton2DConditionModel.from_pretrained(
|
| 50 |
UNET_PATH,
|
| 51 |
-
subfolder="unet_vton",
|
| 52 |
torch_dtype=torch.float16,
|
| 53 |
use_safetensors=True,
|
| 54 |
)
|
|
|
|
| 25 |
from transformers import CLIPTextModel, CLIPTokenizer
|
| 26 |
|
| 27 |
VIT_PATH = "openai/clip-vit-large-patch14"
|
| 28 |
+
VAE_PATH = "levihsu/ootd"
|
| 29 |
+
UNET_PATH = "levihsu/ootd"
|
| 30 |
+
MODEL_PATH = "levihsu/ootd"
|
| 31 |
|
| 32 |
class OOTDiffusionDC:
|
| 33 |
|
|
|
|
| 42 |
|
| 43 |
unet_garm = UNetGarm2DConditionModel.from_pretrained(
|
| 44 |
UNET_PATH,
|
| 45 |
+
subfolder="ootd_dc/checkpoint-36000/unet_garm",
|
| 46 |
torch_dtype=torch.float16,
|
| 47 |
use_safetensors=True,
|
| 48 |
)
|
| 49 |
unet_vton = UNetVton2DConditionModel.from_pretrained(
|
| 50 |
UNET_PATH,
|
| 51 |
+
subfolder="ootd_dc/checkpoint-36000/unet_vton",
|
| 52 |
torch_dtype=torch.float16,
|
| 53 |
use_safetensors=True,
|
| 54 |
)
|