update
Browse files- src/model_processing.py +12 -5
src/model_processing.py
CHANGED
|
@@ -11,7 +11,7 @@ def download_ckpt_yaml(model_path, model_name, ckpt_path, yaml_url=None):
|
|
| 11 |
with open(save_path, 'wb') as f:
|
| 12 |
f.write(response.content)
|
| 13 |
|
| 14 |
-
|
| 15 |
local_dir = os.path.join(model_path, model_name)
|
| 16 |
os.makedirs(local_dir, exist_ok=True)
|
| 17 |
|
|
@@ -34,6 +34,7 @@ def download_ckpt_yaml(model_path, model_name, ckpt_path, yaml_url=None):
|
|
| 34 |
|
| 35 |
def get_model(model_path, model_name):
|
| 36 |
model = None
|
|
|
|
| 37 |
data_params = {
|
| 38 |
"target_image_size": (512, 512),
|
| 39 |
"lock_ratio": True,
|
|
@@ -253,7 +254,10 @@ def get_model(model_path, model_name):
|
|
| 253 |
else:
|
| 254 |
raise Exception(f"Unsupported model: {model_name}")
|
| 255 |
|
| 256 |
-
model = TiTok.from_pretrained(
|
|
|
|
|
|
|
|
|
|
| 257 |
data_params = {
|
| 258 |
"target_image_size": (256, 256),
|
| 259 |
"lock_ratio": True,
|
|
@@ -268,7 +272,8 @@ def get_model(model_path, model_name):
|
|
| 268 |
import types
|
| 269 |
|
| 270 |
model = MultiModalityCausalLM.from_pretrained(
|
| 271 |
-
"deepseek-ai/Janus-Pro-7B", trust_remote_code=True
|
|
|
|
| 272 |
).gen_vision_model
|
| 273 |
model.forward = types.MethodType(forward, model)
|
| 274 |
data_params = {
|
|
@@ -355,7 +360,8 @@ def get_model(model_path, model_name):
|
|
| 355 |
import types
|
| 356 |
|
| 357 |
model = AutoencoderKL.from_pretrained(
|
| 358 |
-
"huaweilin/stable-diffusion-3.5-large-vae", subfolder="vae"
|
|
|
|
| 359 |
)
|
| 360 |
model.forward = types.MethodType(forward, model)
|
| 361 |
data_params = {
|
|
@@ -372,7 +378,8 @@ def get_model(model_path, model_name):
|
|
| 372 |
import types
|
| 373 |
|
| 374 |
model = AutoencoderKL.from_pretrained(
|
| 375 |
-
"black-forest-labs/FLUX.1-dev", subfolder="vae"
|
|
|
|
| 376 |
)
|
| 377 |
model.forward = types.MethodType(forward, model)
|
| 378 |
data_params = {
|
|
|
|
| 11 |
with open(save_path, 'wb') as f:
|
| 12 |
f.write(response.content)
|
| 13 |
|
| 14 |
+
os.makedirs(model_path, exist_ok=True)
|
| 15 |
local_dir = os.path.join(model_path, model_name)
|
| 16 |
os.makedirs(local_dir, exist_ok=True)
|
| 17 |
|
|
|
|
| 34 |
|
| 35 |
def get_model(model_path, model_name):
|
| 36 |
model = None
|
| 37 |
+
huggingface_token = os.environ.get("HUGGINGFACE_TOKEN", None)
|
| 38 |
data_params = {
|
| 39 |
"target_image_size": (512, 512),
|
| 40 |
"lock_ratio": True,
|
|
|
|
| 254 |
else:
|
| 255 |
raise Exception(f"Unsupported model: {model_name}")
|
| 256 |
|
| 257 |
+
model = TiTok.from_pretrained(
|
| 258 |
+
ckpt_path,
|
| 259 |
+
token=huggingface_token
|
| 260 |
+
)
|
| 261 |
data_params = {
|
| 262 |
"target_image_size": (256, 256),
|
| 263 |
"lock_ratio": True,
|
|
|
|
| 272 |
import types
|
| 273 |
|
| 274 |
model = MultiModalityCausalLM.from_pretrained(
|
| 275 |
+
"deepseek-ai/Janus-Pro-7B", trust_remote_code=True,
|
| 276 |
+
token=huggingface_token
|
| 277 |
).gen_vision_model
|
| 278 |
model.forward = types.MethodType(forward, model)
|
| 279 |
data_params = {
|
|
|
|
| 360 |
import types
|
| 361 |
|
| 362 |
model = AutoencoderKL.from_pretrained(
|
| 363 |
+
"huaweilin/stable-diffusion-3.5-large-vae", subfolder="vae",
|
| 364 |
+
token=huggingface_token
|
| 365 |
)
|
| 366 |
model.forward = types.MethodType(forward, model)
|
| 367 |
data_params = {
|
|
|
|
| 378 |
import types
|
| 379 |
|
| 380 |
model = AutoencoderKL.from_pretrained(
|
| 381 |
+
"black-forest-labs/FLUX.1-dev", subfolder="vae",
|
| 382 |
+
token=huggingface_token
|
| 383 |
)
|
| 384 |
model.forward = types.MethodType(forward, model)
|
| 385 |
data_params = {
|