davidvgilmore commited on
Commit
b56a409
·
verified ·
1 Parent(s): 58801f5

Upload hy3dgen/shapegen/models/hunyuan3ddit.py with huggingface_hub

Browse files
hy3dgen/shapegen/models/hunyuan3ddit.py ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Open Source Model Licensed under the Apache License Version 2.0
2
+ # and Other Licenses of the Third-Party Components therein:
3
+ # The below Model in this distribution may have been modified by THL A29 Limited
4
+ # ("Tencent Modifications"). All Tencent Modifications are Copyright (C) 2024 THL A29 Limited.
5
+
6
+ # Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
7
+ # The below software and/or models in this distribution may have been
8
+ # modified by THL A29 Limited ("Tencent Modifications").
9
+ # All Tencent Modifications are Copyright (C) THL A29 Limited.
10
+
11
+ # Hunyuan 3D is licensed under the TENCENT HUNYUAN NON-COMMERCIAL LICENSE AGREEMENT
12
+ # except for the third-party components listed below.
13
+ # Hunyuan 3D does not impose any additional limitations beyond what is outlined
14
+ # in the repsective licenses of these third-party components.
15
+ # Users must comply with all terms and conditions of original licenses of these third-party
16
+ # components and must ensure that the usage of the third party components adheres to
17
+ # all relevant laws and regulations.
18
+
19
+ # For avoidance of doubts, Hunyuan 3D means the large language models and
20
+ # their software and algorithms, including trained model weights, parameters (including
21
+ # optimizer states), machine-learning model code, inference-enabling code, training-enabling code,
22
+ # fine-tuning enabling code and other elements of the foregoing made publicly available
23
+ # by Tencent in accordance with TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT.
24
+
25
+ import math
26
+ from dataclasses import dataclass
27
+ from typing import List, Tuple, Optional
28
+
29
+ import torch
30
+ from einops import rearrange
31
+ from torch import Tensor, nn
32
+
33
+
34
+ def attention(q: Tensor, k: Tensor, v: Tensor, **kwargs) -> Tensor:
35
+ x = torch.nn.functional.scaled_dot_product_attention(q, k, v)
36
+ x = rearrange(x, "B H L D -> B L (H D)")
37
+ return x
38
+
39
+
40
+ def timestep_embedding(t: Tensor, dim, max_period=10000, time_factor: float = 1000.0):
41
+ """
42
+ Create sinusoidal timestep embeddings.
43
+ :param t: a 1-D Tensor of N indices, one per batch element.
44
+ These may be fractional.
45
+ :param dim: the dimension of the output.
46
+ :param max_period: controls the minimum frequency of the embeddings.
47
+ :return: an (N, D) Tensor of positional embeddings.
48
+ """
49
+ t = time_factor * t
50
+ half = dim // 2
51
+ freqs = torch.exp(-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half).to(
52
+ t.device
53
+ )
54
+
55
+ args = t[:, None].float() * freqs[None]
56
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
57
+ if dim % 2:
58
+ embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
59
+ if torch.is_floating_point(t):
60
+ embedding = embedding.to(t)
61
+ return embedding
62
+
63
+
64
+ class MLPEmbedder(nn.Module):
65
+ def __init__(self, in_dim: int, hidden_dim: int):
66
+ super().__init__()
67
+ self.in_layer = nn.Linear(in_dim, hidden_dim, bias=True)
68
+ self.silu = nn.SiLU()
69
+ self.out_layer = nn.Linear(hidden_dim, hidden_dim, bias=True)
70
+
71
+ def forward(self, x: Tensor) -> Tensor:
72
+ return self.out_layer(self.silu(self.in_layer(x)))
73
+
74
+
75
+ class RMSNorm(torch.nn.Module):
76
+ def __init__(self, dim: int):
77
+ super().__init__()
78
+ self.scale = nn.Parameter(torch.ones(dim))
79
+
80
+ def forward(self, x: Tensor):
81
+ x_dtype = x.dtype
82
+ x = x.float()
83
+ rrms = torch.rsqrt(torch.mean(x ** 2, dim=-1, keepdim=True) + 1e-6)
84
+ return (x * rrms).to(dtype=x_dtype) * self.scale
85
+
86
+
87
+ class QKNorm(torch.nn.Module):
88
+ def __init__(self, dim: int):
89
+ super().__init__()
90
+ self.query_norm = RMSNorm(dim)
91
+ self.key_norm = RMSNorm(dim)
92
+
93
+ def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tuple[Tensor, Tensor]:
94
+ q = self.query_norm(q)
95
+ k = self.key_norm(k)
96
+ return q.to(v), k.to(v)
97
+
98
+
99
+ class SelfAttention(nn.Module):
100
+ def __init__(
101
+ self,
102
+ dim: int,
103
+ num_heads: int = 8,
104
+ qkv_bias: bool = False,
105
+ ):
106
+ super().__init__()
107
+ self.num_heads = num_heads
108
+ head_dim = dim // num_heads
109
+
110
+ self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
111
+ self.norm = QKNorm(head_dim)
112
+ self.proj = nn.Linear(dim, dim)
113
+
114
+ def forward(self, x: Tensor, pe: Tensor) -> Tensor:
115
+ qkv = self.qkv(x)
116
+ q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
117
+ q, k = self.norm(q, k, v)
118
+ x = attention(q, k, v, pe=pe)
119
+ x = self.proj(x)
120
+ return x
121
+
122
+
123
+ @dataclass
124
+ class ModulationOut:
125
+ shift: Tensor
126
+ scale: Tensor
127
+ gate: Tensor
128
+
129
+
130
+ class Modulation(nn.Module):
131
+ def __init__(self, dim: int, double: bool):
132
+ super().__init__()
133
+ self.is_double = double
134
+ self.multiplier = 6 if double else 3
135
+ self.lin = nn.Linear(dim, self.multiplier * dim, bias=True)
136
+
137
+ def forward(self, vec: Tensor) -> Tuple[ModulationOut, Optional[ModulationOut]]:
138
+ out = self.lin(nn.functional.silu(vec))[:, None, :]
139
+ out = out.chunk(self.multiplier, dim=-1)
140
+
141
+ return (
142
+ ModulationOut(*out[:3]),
143
+ ModulationOut(*out[3:]) if self.is_double else None,
144
+ )
145
+
146
+
147
+ class DoubleStreamBlock(nn.Module):
148
+ def __init__(
149
+ self,
150
+ hidden_size: int,
151
+ num_heads: int,
152
+ mlp_ratio: float,
153
+ qkv_bias: bool = False,
154
+ ):
155
+ super().__init__()
156
+ mlp_hidden_dim = int(hidden_size * mlp_ratio)
157
+ self.num_heads = num_heads
158
+ self.hidden_size = hidden_size
159
+ self.img_mod = Modulation(hidden_size, double=True)
160
+ self.img_norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
161
+ self.img_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias)
162
+
163
+ self.img_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
164
+ self.img_mlp = nn.Sequential(
165
+ nn.Linear(hidden_size, mlp_hidden_dim, bias=True),
166
+ nn.GELU(approximate="tanh"),
167
+ nn.Linear(mlp_hidden_dim, hidden_size, bias=True),
168
+ )
169
+
170
+ self.txt_mod = Modulation(hidden_size, double=True)
171
+ self.txt_norm1 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
172
+ self.txt_attn = SelfAttention(dim=hidden_size, num_heads=num_heads, qkv_bias=qkv_bias)
173
+
174
+ self.txt_norm2 = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
175
+ self.txt_mlp = nn.Sequential(
176
+ nn.Linear(hidden_size, mlp_hidden_dim, bias=True),
177
+ nn.GELU(approximate="tanh"),
178
+ nn.Linear(mlp_hidden_dim, hidden_size, bias=True),
179
+ )
180
+
181
+ def forward(self, img: Tensor, txt: Tensor, vec: Tensor, pe: Tensor) -> Tuple[Tensor, Tensor]:
182
+ img_mod1, img_mod2 = self.img_mod(vec)
183
+ txt_mod1, txt_mod2 = self.txt_mod(vec)
184
+
185
+ img_modulated = self.img_norm1(img)
186
+ img_modulated = (1 + img_mod1.scale) * img_modulated + img_mod1.shift
187
+ img_qkv = self.img_attn.qkv(img_modulated)
188
+ img_q, img_k, img_v = rearrange(img_qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
189
+ img_q, img_k = self.img_attn.norm(img_q, img_k, img_v)
190
+
191
+ txt_modulated = self.txt_norm1(txt)
192
+ txt_modulated = (1 + txt_mod1.scale) * txt_modulated + txt_mod1.shift
193
+ txt_qkv = self.txt_attn.qkv(txt_modulated)
194
+ txt_q, txt_k, txt_v = rearrange(txt_qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
195
+ txt_q, txt_k = self.txt_attn.norm(txt_q, txt_k, txt_v)
196
+
197
+ q = torch.cat((txt_q, img_q), dim=2)
198
+ k = torch.cat((txt_k, img_k), dim=2)
199
+ v = torch.cat((txt_v, img_v), dim=2)
200
+
201
+ attn = attention(q, k, v, pe=pe)
202
+ txt_attn, img_attn = attn[:, : txt.shape[1]], attn[:, txt.shape[1]:]
203
+
204
+ img = img + img_mod1.gate * self.img_attn.proj(img_attn)
205
+ img = img + img_mod2.gate * self.img_mlp((1 + img_mod2.scale) * self.img_norm2(img) + img_mod2.shift)
206
+
207
+ txt = txt + txt_mod1.gate * self.txt_attn.proj(txt_attn)
208
+ txt = txt + txt_mod2.gate * self.txt_mlp((1 + txt_mod2.scale) * self.txt_norm2(txt) + txt_mod2.shift)
209
+ return img, txt
210
+
211
+
212
+ class SingleStreamBlock(nn.Module):
213
+ """
214
+ A DiT block with parallel linear layers as described in
215
+ https://arxiv.org/abs/2302.05442 and adapted modulation interface.
216
+ """
217
+
218
+ def __init__(
219
+ self,
220
+ hidden_size: int,
221
+ num_heads: int,
222
+ mlp_ratio: float = 4.0,
223
+ qk_scale: Optional[float] = None,
224
+ ):
225
+ super().__init__()
226
+
227
+ self.hidden_dim = hidden_size
228
+ self.num_heads = num_heads
229
+ head_dim = hidden_size // num_heads
230
+ self.scale = qk_scale or head_dim ** -0.5
231
+
232
+ self.mlp_hidden_dim = int(hidden_size * mlp_ratio)
233
+ # qkv and mlp_in
234
+ self.linear1 = nn.Linear(hidden_size, hidden_size * 3 + self.mlp_hidden_dim)
235
+ # proj and mlp_out
236
+ self.linear2 = nn.Linear(hidden_size + self.mlp_hidden_dim, hidden_size)
237
+
238
+ self.norm = QKNorm(head_dim)
239
+
240
+ self.hidden_size = hidden_size
241
+ self.pre_norm = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
242
+
243
+ self.mlp_act = nn.GELU(approximate="tanh")
244
+ self.modulation = Modulation(hidden_size, double=False)
245
+
246
+ def forward(self, x: Tensor, vec: Tensor, pe: Tensor) -> Tensor:
247
+ mod, _ = self.modulation(vec)
248
+
249
+ x_mod = (1 + mod.scale) * self.pre_norm(x) + mod.shift
250
+ qkv, mlp = torch.split(self.linear1(x_mod), [3 * self.hidden_size, self.mlp_hidden_dim], dim=-1)
251
+
252
+ q, k, v = rearrange(qkv, "B L (K H D) -> K B H L D", K=3, H=self.num_heads)
253
+ q, k = self.norm(q, k, v)
254
+
255
+ # compute attention
256
+ attn = attention(q, k, v, pe=pe)
257
+ # compute activation in mlp stream, cat again and run second linear layer
258
+ output = self.linear2(torch.cat((attn, self.mlp_act(mlp)), 2))
259
+ return x + mod.gate * output
260
+
261
+
262
+ class LastLayer(nn.Module):
263
+ def __init__(self, hidden_size: int, patch_size: int, out_channels: int):
264
+ super().__init__()
265
+ self.norm_final = nn.LayerNorm(hidden_size, elementwise_affine=False, eps=1e-6)
266
+ self.linear = nn.Linear(hidden_size, patch_size * patch_size * out_channels, bias=True)
267
+ self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(hidden_size, 2 * hidden_size, bias=True))
268
+
269
+ def forward(self, x: Tensor, vec: Tensor) -> Tensor:
270
+ shift, scale = self.adaLN_modulation(vec).chunk(2, dim=1)
271
+ x = (1 + scale[:, None, :]) * self.norm_final(x) + shift[:, None, :]
272
+ x = self.linear(x)
273
+ return x
274
+
275
+
276
+ class Hunyuan3DDiT(nn.Module):
277
+ def __init__(
278
+ self,
279
+ in_channels: int = 64,
280
+ context_in_dim: int = 1536,
281
+ hidden_size: int = 1024,
282
+ mlp_ratio: float = 4.0,
283
+ num_heads: int = 16,
284
+ depth: int = 16,
285
+ depth_single_blocks: int = 32,
286
+ axes_dim: List[int] = [64],
287
+ theta: int = 10_000,
288
+ qkv_bias: bool = True,
289
+ time_factor: float = 1000,
290
+ ckpt_path: Optional[str] = None,
291
+ **kwargs,
292
+ ):
293
+ super().__init__()
294
+ self.in_channels = in_channels
295
+ self.context_in_dim = context_in_dim
296
+ self.hidden_size = hidden_size
297
+ self.mlp_ratio = mlp_ratio
298
+ self.num_heads = num_heads
299
+ self.depth = depth
300
+ self.depth_single_blocks = depth_single_blocks
301
+ self.axes_dim = axes_dim
302
+ self.theta = theta
303
+ self.qkv_bias = qkv_bias
304
+ self.time_factor = time_factor
305
+ self.out_channels = self.in_channels
306
+
307
+ if hidden_size % num_heads != 0:
308
+ raise ValueError(
309
+ f"Hidden size {hidden_size} must be divisible by num_heads {num_heads}"
310
+ )
311
+ pe_dim = hidden_size // num_heads
312
+ if sum(axes_dim) != pe_dim:
313
+ raise ValueError(f"Got {axes_dim} but expected positional dim {pe_dim}")
314
+ self.hidden_size = hidden_size
315
+ self.num_heads = num_heads
316
+ self.latent_in = nn.Linear(self.in_channels, self.hidden_size, bias=True)
317
+ self.time_in = MLPEmbedder(in_dim=256, hidden_dim=self.hidden_size)
318
+ self.cond_in = nn.Linear(context_in_dim, self.hidden_size)
319
+
320
+ self.double_blocks = nn.ModuleList(
321
+ [
322
+ DoubleStreamBlock(
323
+ self.hidden_size,
324
+ self.num_heads,
325
+ mlp_ratio=mlp_ratio,
326
+ qkv_bias=qkv_bias,
327
+ )
328
+ for _ in range(depth)
329
+ ]
330
+ )
331
+
332
+ self.single_blocks = nn.ModuleList(
333
+ [
334
+ SingleStreamBlock(
335
+ self.hidden_size,
336
+ self.num_heads,
337
+ mlp_ratio=mlp_ratio,
338
+ )
339
+ for _ in range(depth_single_blocks)
340
+ ]
341
+ )
342
+
343
+ self.final_layer = LastLayer(self.hidden_size, 1, self.out_channels)
344
+
345
+ if ckpt_path is not None:
346
+ print('restored denoiser ckpt', ckpt_path)
347
+
348
+ ckpt = torch.load(ckpt_path, map_location="cpu")
349
+ if 'state_dict' not in ckpt:
350
+ # deepspeed ckpt
351
+ state_dict = {}
352
+ for k in ckpt.keys():
353
+ new_k = k.replace('_forward_module.', '')
354
+ state_dict[new_k] = ckpt[k]
355
+ else:
356
+ state_dict = ckpt["state_dict"]
357
+
358
+ final_state_dict = {}
359
+ for k, v in state_dict.items():
360
+ if k.startswith('model.'):
361
+ final_state_dict[k.replace('model.', '')] = v
362
+ else:
363
+ final_state_dict[k] = v
364
+ missing, unexpected = self.load_state_dict(final_state_dict, strict=False)
365
+ print('unexpected keys:', unexpected)
366
+ print('missing keys:', missing)
367
+
368
+ def forward(
369
+ self,
370
+ x,
371
+ t,
372
+ contexts,
373
+ **kwargs,
374
+ ) -> Tensor:
375
+ cond = contexts['main']
376
+ latent = self.latent_in(x)
377
+ vec = self.time_in(timestep_embedding(t, 256, self.time_factor).to(dtype=latent.dtype))
378
+ cond = self.cond_in(cond)
379
+ pe = None
380
+
381
+ for block in self.double_blocks:
382
+ latent, cond = block(img=latent, txt=cond, vec=vec, pe=pe)
383
+
384
+ latent = torch.cat((cond, latent), 1)
385
+ for block in self.single_blocks:
386
+ latent = block(latent, vec=vec, pe=pe)
387
+
388
+ latent = latent[:, cond.shape[1]:, ...]
389
+ latent = self.final_layer(latent, vec)
390
+ return latent