primerz commited on
Commit
0a1370c
·
verified ·
1 Parent(s): 3858a52

Update ip_adapter/resampler.py

Browse files
Files changed (1) hide show
  1. ip_adapter/resampler.py +42 -80
ip_adapter/resampler.py CHANGED
@@ -1,31 +1,24 @@
1
- # modified from https://github.com/mlfoundations/open_flamingo/blob/main/open_flamingo/src/helpers.py
2
- import math
3
-
4
  import torch
5
  import torch.nn as nn
 
 
 
 
 
 
 
 
 
 
 
 
6
 
 
 
7
 
8
- # FFN
9
- def FeedForward(dim, mult=4):
10
- inner_dim = int(dim * mult)
11
- return nn.Sequential(
12
- nn.LayerNorm(dim),
13
- nn.Linear(dim, inner_dim, bias=False),
14
- nn.GELU(),
15
- nn.Linear(inner_dim, dim, bias=False),
16
- )
17
-
18
-
19
  def reshape_tensor(x, heads):
20
- bs, length, width = x.shape
21
- #(bs, length, width) --> (bs, length, n_heads, dim_per_head)
22
- x = x.view(bs, length, heads, -1)
23
- # (bs, length, n_heads, dim_per_head) --> (bs, n_heads, length, dim_per_head)
24
- x = x.transpose(1, 2)
25
- # (bs, n_heads, length, dim_per_head) --> (bs*n_heads, length, dim_per_head)
26
- x = x.reshape(bs, heads, length, -1)
27
- return x
28
-
29
 
30
  class PerceiverAttention(nn.Module):
31
  def __init__(self, *, dim, dim_head=64, heads=8):
@@ -38,84 +31,53 @@ class PerceiverAttention(nn.Module):
38
  self.norm1 = nn.LayerNorm(dim)
39
  self.norm2 = nn.LayerNorm(dim)
40
 
41
- self.to_q = nn.Linear(dim, inner_dim, bias=False)
42
- self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
43
- self.to_out = nn.Linear(inner_dim, dim, bias=False)
44
 
 
 
 
45
 
46
  def forward(self, x, latents):
47
- """
48
- Args:
49
- x (torch.Tensor): image features
50
- shape (b, n1, D)
51
- latent (torch.Tensor): latent features
52
- shape (b, n2, D)
53
- """
54
  x = self.norm1(x)
55
  latents = self.norm2(latents)
56
-
57
- b, l, _ = latents.shape
58
 
59
  q = self.to_q(latents)
60
  kv_input = torch.cat((x, latents), dim=-2)
61
  k, v = self.to_kv(kv_input).chunk(2, dim=-1)
62
-
63
- q = reshape_tensor(q, self.heads)
64
- k = reshape_tensor(k, self.heads)
65
- v = reshape_tensor(v, self.heads)
66
-
67
- # attention
68
- scale = 1 / math.sqrt(math.sqrt(self.dim_head))
69
- weight = (q * scale) @ (k * scale).transpose(-2, -1) # More stable with f16 than dividing afterwards
70
- weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)
71
- out = weight @ v
72
-
73
- out = out.permute(0, 2, 1, 3).reshape(b, l, -1)
74
 
75
- return self.to_out(out)
76
 
 
 
 
 
 
77
 
78
  class Resampler(nn.Module):
79
- def __init__(
80
- self,
81
- dim=1024,
82
- depth=8,
83
- dim_head=64,
84
- heads=16,
85
- num_queries=8,
86
- embedding_dim=768,
87
- output_dim=1024,
88
- ff_mult=4,
89
- ):
90
  super().__init__()
91
-
92
- self.latents = nn.Parameter(torch.randn(1, num_queries, dim) / dim**0.5)
93
-
94
- self.proj_in = nn.Linear(embedding_dim, dim)
95
 
 
96
  self.proj_out = nn.Linear(dim, output_dim)
97
  self.norm_out = nn.LayerNorm(output_dim)
98
-
99
- self.layers = nn.ModuleList([])
100
- for _ in range(depth):
101
- self.layers.append(
102
- nn.ModuleList(
103
- [
104
- PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
105
- FeedForward(dim=dim, mult=ff_mult),
106
- ]
107
- )
108
- )
109
 
110
  def forward(self, x):
111
-
112
  latents = self.latents.repeat(x.size(0), 1, 1)
113
-
114
  x = self.proj_in(x)
115
-
116
  for attn, ff in self.layers:
117
  latents = attn(x, latents) + latents
118
  latents = ff(latents) + latents
119
-
120
- latents = self.proj_out(latents)
121
- return self.norm_out(latents)
 
 
 
 
1
  import torch
2
  import torch.nn as nn
3
+ import torch.nn.functional as F
4
+ import math
5
+
6
+ class FeedForward(nn.Module):
7
+ def __init__(self, dim, mult=4):
8
+ super().__init__()
9
+ self.norm = nn.LayerNorm(dim)
10
+ self.fc1 = nn.Linear(dim, int(dim * mult))
11
+ self.act = nn.GELU()
12
+ self.fc2 = nn.Linear(int(dim * mult), dim)
13
+ nn.init.xavier_uniform_(self.fc1.weight)
14
+ nn.init.xavier_uniform_(self.fc2.weight)
15
 
16
+ def forward(self, x):
17
+ return x + self.fc2(self.act(self.fc1(self.norm(x))))
18
 
 
 
 
 
 
 
 
 
 
 
 
19
  def reshape_tensor(x, heads):
20
+ bs, length, _ = x.shape
21
+ return x.view(bs, length, heads, -1).transpose(1, 2)
 
 
 
 
 
 
 
22
 
23
  class PerceiverAttention(nn.Module):
24
  def __init__(self, *, dim, dim_head=64, heads=8):
 
31
  self.norm1 = nn.LayerNorm(dim)
32
  self.norm2 = nn.LayerNorm(dim)
33
 
34
+ self.to_q = nn.Linear(dim, inner_dim)
35
+ self.to_kv = nn.Linear(dim, inner_dim * 2)
36
+ self.to_out = nn.Linear(inner_dim, dim)
37
 
38
+ nn.init.xavier_uniform_(self.to_q.weight)
39
+ nn.init.xavier_uniform_(self.to_kv.weight)
40
+ nn.init.xavier_uniform_(self.to_out.weight)
41
 
42
  def forward(self, x, latents):
 
 
 
 
 
 
 
43
  x = self.norm1(x)
44
  latents = self.norm2(latents)
 
 
45
 
46
  q = self.to_q(latents)
47
  kv_input = torch.cat((x, latents), dim=-2)
48
  k, v = self.to_kv(kv_input).chunk(2, dim=-1)
 
 
 
 
 
 
 
 
 
 
 
 
49
 
50
+ q, k, v = map(lambda t: reshape_tensor(t, self.heads), (q, k, v))
51
 
52
+ attn_score = (q @ k.transpose(-2, -1)) * self.scale
53
+ attn_weight = F.softmax(attn_score, dim=-1)
54
+ out = (attn_weight @ v).transpose(1, 2).reshape(latents.shape)
55
+
56
+ return self.to_out(out)
57
 
58
  class Resampler(nn.Module):
59
+ def __init__(self, dim=1024, depth=8, dim_head=64, heads=16, num_queries=8, embedding_dim=768, output_dim=1024, ff_mult=4):
 
 
 
 
 
 
 
 
 
 
60
  super().__init__()
61
+ self.latents = nn.Parameter(torch.empty(1, num_queries, dim))
62
+ nn.init.normal_(self.latents, mean=0, std=dim**-0.5)
 
 
63
 
64
+ self.proj_in = nn.Linear(embedding_dim, dim)
65
  self.proj_out = nn.Linear(dim, output_dim)
66
  self.norm_out = nn.LayerNorm(output_dim)
67
+
68
+ self.layers = nn.ModuleList([
69
+ nn.ModuleList([
70
+ PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
71
+ FeedForward(dim=dim, mult=ff_mult),
72
+ ]) for _ in range(depth)
73
+ ])
 
 
 
 
74
 
75
  def forward(self, x):
 
76
  latents = self.latents.repeat(x.size(0), 1, 1)
 
77
  x = self.proj_in(x)
78
+
79
  for attn, ff in self.layers:
80
  latents = attn(x, latents) + latents
81
  latents = ff(latents) + latents
82
+
83
+ return self.norm_out(self.proj_out(latents))