Upload folder using huggingface_hub
Browse files- main/matryoshka.py +5 -74
main/matryoshka.py
CHANGED
@@ -80,7 +80,6 @@ from diffusers.utils import (
|
|
80 |
USE_PEFT_BACKEND,
|
81 |
BaseOutput,
|
82 |
deprecate,
|
83 |
-
is_torch_version,
|
84 |
is_torch_xla_available,
|
85 |
logging,
|
86 |
replace_example_docstring,
|
@@ -869,23 +868,7 @@ class CrossAttnDownBlock2D(nn.Module):
|
|
869 |
|
870 |
for i, (resnet, attn) in enumerate(blocks):
|
871 |
if torch.is_grad_enabled() and self.gradient_checkpointing:
|
872 |
-
|
873 |
-
def create_custom_forward(module, return_dict=None):
|
874 |
-
def custom_forward(*inputs):
|
875 |
-
if return_dict is not None:
|
876 |
-
return module(*inputs, return_dict=return_dict)
|
877 |
-
else:
|
878 |
-
return module(*inputs)
|
879 |
-
|
880 |
-
return custom_forward
|
881 |
-
|
882 |
-
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
|
883 |
-
hidden_states = torch.utils.checkpoint.checkpoint(
|
884 |
-
create_custom_forward(resnet),
|
885 |
-
hidden_states,
|
886 |
-
temb,
|
887 |
-
**ckpt_kwargs,
|
888 |
-
)
|
889 |
hidden_states = attn(
|
890 |
hidden_states,
|
891 |
encoder_hidden_states=encoder_hidden_states,
|
@@ -1030,17 +1013,6 @@ class UNetMidBlock2DCrossAttn(nn.Module):
|
|
1030 |
hidden_states = self.resnets[0](hidden_states, temb)
|
1031 |
for attn, resnet in zip(self.attentions, self.resnets[1:]):
|
1032 |
if torch.is_grad_enabled() and self.gradient_checkpointing:
|
1033 |
-
|
1034 |
-
def create_custom_forward(module, return_dict=None):
|
1035 |
-
def custom_forward(*inputs):
|
1036 |
-
if return_dict is not None:
|
1037 |
-
return module(*inputs, return_dict=return_dict)
|
1038 |
-
else:
|
1039 |
-
return module(*inputs)
|
1040 |
-
|
1041 |
-
return custom_forward
|
1042 |
-
|
1043 |
-
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
|
1044 |
hidden_states = attn(
|
1045 |
hidden_states,
|
1046 |
encoder_hidden_states=encoder_hidden_states,
|
@@ -1049,12 +1021,7 @@ class UNetMidBlock2DCrossAttn(nn.Module):
|
|
1049 |
encoder_attention_mask=encoder_attention_mask,
|
1050 |
return_dict=False,
|
1051 |
)[0]
|
1052 |
-
hidden_states =
|
1053 |
-
create_custom_forward(resnet),
|
1054 |
-
hidden_states,
|
1055 |
-
temb,
|
1056 |
-
**ckpt_kwargs,
|
1057 |
-
)
|
1058 |
else:
|
1059 |
hidden_states = attn(
|
1060 |
hidden_states,
|
@@ -1192,23 +1159,7 @@ class CrossAttnUpBlock2D(nn.Module):
|
|
1192 |
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
|
1193 |
|
1194 |
if torch.is_grad_enabled() and self.gradient_checkpointing:
|
1195 |
-
|
1196 |
-
def create_custom_forward(module, return_dict=None):
|
1197 |
-
def custom_forward(*inputs):
|
1198 |
-
if return_dict is not None:
|
1199 |
-
return module(*inputs, return_dict=return_dict)
|
1200 |
-
else:
|
1201 |
-
return module(*inputs)
|
1202 |
-
|
1203 |
-
return custom_forward
|
1204 |
-
|
1205 |
-
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
|
1206 |
-
hidden_states = torch.utils.checkpoint.checkpoint(
|
1207 |
-
create_custom_forward(resnet),
|
1208 |
-
hidden_states,
|
1209 |
-
temb,
|
1210 |
-
**ckpt_kwargs,
|
1211 |
-
)
|
1212 |
hidden_states = attn(
|
1213 |
hidden_states,
|
1214 |
encoder_hidden_states=encoder_hidden_states,
|
@@ -1282,10 +1233,6 @@ class MatryoshkaTransformer2DModel(LegacyModelMixin, LegacyConfigMixin):
|
|
1282 |
]
|
1283 |
)
|
1284 |
|
1285 |
-
def _set_gradient_checkpointing(self, module, value=False):
|
1286 |
-
if hasattr(module, "gradient_checkpointing"):
|
1287 |
-
module.gradient_checkpointing = value
|
1288 |
-
|
1289 |
def forward(
|
1290 |
self,
|
1291 |
hidden_states: torch.Tensor,
|
@@ -1365,19 +1312,8 @@ class MatryoshkaTransformer2DModel(LegacyModelMixin, LegacyConfigMixin):
|
|
1365 |
# Blocks
|
1366 |
for block in self.transformer_blocks:
|
1367 |
if torch.is_grad_enabled() and self.gradient_checkpointing:
|
1368 |
-
|
1369 |
-
|
1370 |
-
def custom_forward(*inputs):
|
1371 |
-
if return_dict is not None:
|
1372 |
-
return module(*inputs, return_dict=return_dict)
|
1373 |
-
else:
|
1374 |
-
return module(*inputs)
|
1375 |
-
|
1376 |
-
return custom_forward
|
1377 |
-
|
1378 |
-
ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
|
1379 |
-
hidden_states = torch.utils.checkpoint.checkpoint(
|
1380 |
-
create_custom_forward(block),
|
1381 |
hidden_states,
|
1382 |
attention_mask,
|
1383 |
encoder_hidden_states,
|
@@ -1385,7 +1321,6 @@ class MatryoshkaTransformer2DModel(LegacyModelMixin, LegacyConfigMixin):
|
|
1385 |
timestep,
|
1386 |
cross_attention_kwargs,
|
1387 |
class_labels,
|
1388 |
-
**ckpt_kwargs,
|
1389 |
)
|
1390 |
else:
|
1391 |
hidden_states = block(
|
@@ -2724,10 +2659,6 @@ class MatryoshkaUNet2DConditionModel(
|
|
2724 |
for module in self.children():
|
2725 |
fn_recursive_set_attention_slice(module, reversed_slice_size)
|
2726 |
|
2727 |
-
def _set_gradient_checkpointing(self, module, value=False):
|
2728 |
-
if hasattr(module, "gradient_checkpointing"):
|
2729 |
-
module.gradient_checkpointing = value
|
2730 |
-
|
2731 |
def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
|
2732 |
r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497.
|
2733 |
|
|
|
80 |
USE_PEFT_BACKEND,
|
81 |
BaseOutput,
|
82 |
deprecate,
|
|
|
83 |
is_torch_xla_available,
|
84 |
logging,
|
85 |
replace_example_docstring,
|
|
|
868 |
|
869 |
for i, (resnet, attn) in enumerate(blocks):
|
870 |
if torch.is_grad_enabled() and self.gradient_checkpointing:
|
871 |
+
hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
872 |
hidden_states = attn(
|
873 |
hidden_states,
|
874 |
encoder_hidden_states=encoder_hidden_states,
|
|
|
1013 |
hidden_states = self.resnets[0](hidden_states, temb)
|
1014 |
for attn, resnet in zip(self.attentions, self.resnets[1:]):
|
1015 |
if torch.is_grad_enabled() and self.gradient_checkpointing:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1016 |
hidden_states = attn(
|
1017 |
hidden_states,
|
1018 |
encoder_hidden_states=encoder_hidden_states,
|
|
|
1021 |
encoder_attention_mask=encoder_attention_mask,
|
1022 |
return_dict=False,
|
1023 |
)[0]
|
1024 |
+
hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb)
|
|
|
|
|
|
|
|
|
|
|
1025 |
else:
|
1026 |
hidden_states = attn(
|
1027 |
hidden_states,
|
|
|
1159 |
hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
|
1160 |
|
1161 |
if torch.is_grad_enabled() and self.gradient_checkpointing:
|
1162 |
+
hidden_states = self._gradient_checkpointing_func(resnet, hidden_states, temb)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1163 |
hidden_states = attn(
|
1164 |
hidden_states,
|
1165 |
encoder_hidden_states=encoder_hidden_states,
|
|
|
1233 |
]
|
1234 |
)
|
1235 |
|
|
|
|
|
|
|
|
|
1236 |
def forward(
|
1237 |
self,
|
1238 |
hidden_states: torch.Tensor,
|
|
|
1312 |
# Blocks
|
1313 |
for block in self.transformer_blocks:
|
1314 |
if torch.is_grad_enabled() and self.gradient_checkpointing:
|
1315 |
+
hidden_states = self._gradient_checkpointing_func(
|
1316 |
+
block,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1317 |
hidden_states,
|
1318 |
attention_mask,
|
1319 |
encoder_hidden_states,
|
|
|
1321 |
timestep,
|
1322 |
cross_attention_kwargs,
|
1323 |
class_labels,
|
|
|
1324 |
)
|
1325 |
else:
|
1326 |
hidden_states = block(
|
|
|
2659 |
for module in self.children():
|
2660 |
fn_recursive_set_attention_slice(module, reversed_slice_size)
|
2661 |
|
|
|
|
|
|
|
|
|
2662 |
def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
|
2663 |
r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497.
|
2664 |
|