vikho commited on
Commit
d4b7928
·
verified ·
1 Parent(s): ac32f32

Upload 13 files

Browse files
Files changed (13) hide show
  1. README.md +33 -3
  2. bert_padding.py +220 -0
  3. block.py +401 -0
  4. config .json +40 -0
  5. configuration_bert.py +121 -0
  6. convert_v2_weights.py +151 -0
  7. embedding.py +60 -0
  8. jina_clip_handler.py +118 -0
  9. mha.py +821 -0
  10. mlp.py +243 -0
  11. modeling_bert.py +806 -0
  12. modeling_for_glue.py +264 -0
  13. modeling_lora.py +336 -0
README.md CHANGED
@@ -1,3 +1,33 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # BERT with Flash-Attention
2
+ ### Installing dependencies
3
+ To run the model on GPU, you need to install Flash Attention.
4
+ You may either install from pypi (which may not work with fused-dense), or from source.
5
+ To install from source, clone the GitHub repository:
6
+ ```console
7
+ git clone [email protected]:Dao-AILab/flash-attention.git
8
+ ```
9
+ The code provided here should work with commit `43950dd`.
10
+ Change to the cloned repo and install:
11
+ ```console
12
+ cd flash-attention && python setup.py install
13
+ ```
14
+ This will compile the flash-attention kernel, which will take some time.
15
+
16
+ If you would like to use fused MLPs (e.g. to use activation checkpointing),
17
+ you may install fused-dense also from source:
18
+ ```console
19
+ cd csrc/fused_dense_lib && python setup.py install
20
+ ```
21
+
22
+
23
+ ### Configuration
24
+ The config adds some new parameters:
25
+ - `use_flash_attn`: If `True`, always use flash attention. If `None`, use flash attention when GPU is available. If `False`, never use flash attention (works on CPU).
26
+ - `window_size`: Size (left and right) of the local attention window. If `(-1, -1)`, use global attention
27
+ - `dense_seq_output`: If true, we only need to pass the hidden states for the masked out token (around 15%) to the classifier heads. I set this to true for pretraining.
28
+ - `fused_mlp`: Whether to use fused-dense. Useful to reduce VRAM in combination with activation checkpointing
29
+ - `mlp_checkpoint_lvl`: One of `{0, 1, 2}`. Increasing this increases the amount of activation checkpointing within the MLP. Keep this at 0 for pretraining and use gradient accumulation instead. For embedding training, increase this as much as needed.
30
+ - `last_layer_subset`: If true, we only need the compute the last layer for a subset of tokens. I left this to false.
31
+ - `use_qk_norm`: Whether or not to use QK-normalization
32
+ - `num_loras`: Number of LoRAs to use when initializing a `BertLoRA` model. Has no effect on other models.
33
+
bert_padding.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/padding.py
2
+
3
+ """"
4
+ The implementation was further adapted from
5
+ https://github.com/Dao-AILab/flash-attention/blob/43950dda456e095969d842fca7a73c5bfe3cecd0
6
+ """
7
+
8
+ import torch
9
+ import torch.nn.functional as F
10
+ from einops import rearrange, repeat
11
+
12
+
13
+ class IndexFirstAxis(torch.autograd.Function):
14
+ @staticmethod
15
+ def forward(ctx, input, indices):
16
+ ctx.save_for_backward(indices)
17
+ assert input.ndim >= 2
18
+ ctx.first_axis_dim, other_shape = input.shape[0], input.shape[1:]
19
+ second_dim = other_shape.numel()
20
+ # TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
21
+ # return input[indices]
22
+ return torch.gather(
23
+ rearrange(input, "b ... -> b (...)"), 0, repeat(indices, "z -> z d", d=second_dim)
24
+ ).reshape(-1, *other_shape)
25
+
26
+ @staticmethod
27
+ def backward(ctx, grad_output):
28
+ (indices,) = ctx.saved_tensors
29
+ assert grad_output.ndim >= 2
30
+ other_shape = grad_output.shape[1:]
31
+ grad_output = rearrange(grad_output, "b ... -> b (...)")
32
+ grad_input = torch.zeros(
33
+ [ctx.first_axis_dim, grad_output.shape[1]],
34
+ device=grad_output.device,
35
+ dtype=grad_output.dtype,
36
+ )
37
+ # TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
38
+ # grad_input[indices] = grad_output
39
+ grad_input.scatter_(0, repeat(indices, "z -> z d", d=grad_output.shape[1]), grad_output)
40
+ return grad_input.reshape(ctx.first_axis_dim, *other_shape), None
41
+
42
+
43
+ index_first_axis = IndexFirstAxis.apply
44
+
45
+
46
+ class IndexPutFirstAxis(torch.autograd.Function):
47
+ @staticmethod
48
+ def forward(ctx, values, indices, first_axis_dim):
49
+ ctx.save_for_backward(indices)
50
+ assert indices.ndim == 1
51
+ assert values.ndim >= 2
52
+ output = torch.zeros(
53
+ first_axis_dim, *values.shape[1:], device=values.device, dtype=values.dtype
54
+ )
55
+ # TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
56
+ output[indices] = values
57
+ # output.scatter_(0, repeat(indices, 'z -> z d', d=values.shape[1]), values)
58
+ return output
59
+
60
+ @staticmethod
61
+ def backward(ctx, grad_output):
62
+ (indices,) = ctx.saved_tensors
63
+ # TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
64
+ grad_values = grad_output[indices]
65
+ # grad_values = torch.gather(grad_output, 0, repeat(indices, 'z -> z d', d=grad_output.shape[1]))
66
+ return grad_values, None, None
67
+
68
+
69
+ index_put_first_axis = IndexPutFirstAxis.apply
70
+
71
+
72
+ class IndexFirstAxisResidual(torch.autograd.Function):
73
+ @staticmethod
74
+ def forward(ctx, input, indices):
75
+ ctx.save_for_backward(indices)
76
+ assert input.ndim >= 2
77
+ ctx.first_axis_dim, other_shape = input.shape[0], input.shape[1:]
78
+ second_dim = other_shape.numel()
79
+ # TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
80
+ output = input[indices]
81
+ # We don't want to reshape input (b ... -> b (...)) since it could change the channel_last
82
+ # memory format to channel_first. In other words, input might not be contiguous.
83
+ # If we don't detach, Pytorch complains about output being a view and is being modified inplace
84
+ return output, input.detach()
85
+
86
+ @staticmethod
87
+ def backward(ctx, grad_output, grad_residual):
88
+ (indices,) = ctx.saved_tensors
89
+ assert grad_output.ndim >= 2
90
+ other_shape = grad_output.shape[1:]
91
+ assert grad_residual.shape[1:] == other_shape
92
+ grad_input = grad_residual
93
+ # grad_input[indices] += grad_output
94
+ indices = indices.reshape(indices.shape[0], *((1,) * (grad_output.ndim - 1)))
95
+ indices = indices.expand_as(grad_output)
96
+ grad_input.scatter_add_(0, indices, grad_output)
97
+ return grad_input.reshape(ctx.first_axis_dim, *other_shape), None
98
+
99
+
100
+ index_first_axis_residual = IndexFirstAxisResidual.apply
101
+
102
+
103
+ def unpad_input(hidden_states, attention_mask):
104
+ """
105
+ Arguments:
106
+ hidden_states: (batch, seqlen, ...)
107
+ attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid.
108
+ Return:
109
+ hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
110
+ indices: (total_nnz), the indices of non-masked tokens from the flattened input sequence.
111
+ cu_seqlens: (batch + 1), the cumulative sequence lengths, used to index into hidden_states.
112
+ max_seqlen_in_batch: int
113
+ """
114
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
115
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
116
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
117
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
118
+ # TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
119
+ # bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
120
+ # times larger than it needs to be, wasting memory. It's faster and more memory-efficient to
121
+ # index with integer indices. Moreover, torch's index is a bit slower than it needs to be,
122
+ # so we write custom forward and backward to make it a bit faster.
123
+ return (
124
+ index_first_axis(rearrange(hidden_states, "b s ... -> (b s) ..."), indices),
125
+ indices,
126
+ cu_seqlens,
127
+ max_seqlen_in_batch,
128
+ )
129
+
130
+
131
+ def unpad_input_for_concatenated_sequences(hidden_states, attention_mask_in_length):
132
+ """
133
+ Supports concatenating short samples in one sequence. The attention_mask_in_length is utilized to mask other short samples. It helps efficient training of variant lengths-based samples (e.g., the supervised fine-tuning task in large language model).
134
+ The motivation for this function is explained [here](https://github.com/Dao-AILab/flash-attention/issues/432#issuecomment-1668822286).
135
+
136
+ For example, if batch = 3 and seqlen = 6, the attention_mask_in_length is:
137
+ ```
138
+ [
139
+ [2, 3, 0, 0, 0, 0],
140
+ [3, 2, 0, 0, 0, 0],
141
+ [6, 0, 0, 0, 0, 0]
142
+ ]
143
+ ```
144
+ , which refers to the 3D-attention mask:
145
+ ```
146
+ [
147
+ [
148
+ [1, 0, 0, 0, 0, 0],
149
+ [1, 1, 0, 0, 0, 0],
150
+ [0, 0, 1, 0, 0, 0],
151
+ [0, 0, 1, 1, 0, 0],
152
+ [0, 0, 1, 1, 1, 0],
153
+ [0, 0, 0, 0, 0, 1]
154
+ ],
155
+ [
156
+ [1, 0, 0, 0, 0, 0],
157
+ [1, 1, 0, 0, 0, 0],
158
+ [1, 1, 1, 0, 0, 0],
159
+ [0, 0, 0, 1, 0, 0],
160
+ [0, 0, 0, 1, 1, 0],
161
+ [0, 0, 0, 0, 0, 1]
162
+ ],
163
+ [
164
+ [1, 0, 0, 0, 0, 0],
165
+ [1, 1, 0, 0, 0, 0],
166
+ [1, 1, 1, 0, 0, 0],
167
+ [1, 1, 1, 1, 0, 0],
168
+ [1, 1, 1, 1, 1, 0],
169
+ [1, 1, 1, 1, 1, 1]
170
+ ]
171
+ ]
172
+ ```.
173
+
174
+ Arguments:
175
+ hidden_states: (batch, seqlen, ...)
176
+ attention_mask_in_length: (batch, seqlen), int, a nonzero number (e.g., 1, 2, 3, etc.) means length of concatenated sequence in b-th batch, and 0 means none.
177
+ Return:
178
+ hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
179
+ indices: (total_nnz), the indices of non-masked tokens from the flattened input sequence.
180
+ cu_seqlens: (batch + 1), the cumulative sequence lengths, used to index into hidden_states.
181
+ max_seqlen_in_batch: int
182
+ """
183
+ length = attention_mask_in_length.sum(dim=-1)
184
+ seqlen = attention_mask_in_length.size(-1)
185
+ attention_mask_2d = torch.arange(seqlen, device=length.device, dtype=length.dtype).expand(len(length),
186
+ seqlen) < length.unsqueeze(
187
+ 1)
188
+ real_indices_idx = torch.nonzero(attention_mask_in_length.flatten(), as_tuple=False).flatten()
189
+ seqlens_in_batch = attention_mask_in_length.flatten()[real_indices_idx]
190
+ indices = torch.nonzero(attention_mask_2d.flatten(), as_tuple=False).flatten()
191
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
192
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
193
+ # TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
194
+ # bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
195
+ # times larger than it needs to be, wasting memory. It's faster and more memory-efficient to
196
+ # index with integer indices. Moreover, torch's index is a bit slower than it needs to be,
197
+ # so we write custom forward and backward to make it a bit faster.
198
+ return (
199
+ index_first_axis(rearrange(hidden_states, "b s ... -> (b s) ..."), indices),
200
+ indices,
201
+ cu_seqlens,
202
+ max_seqlen_in_batch,
203
+ )
204
+
205
+
206
+ def pad_input(hidden_states, indices, batch, seqlen):
207
+ """
208
+ Arguments:
209
+ hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
210
+ indices: (total_nnz), the indices that represent the non-masked tokens of the original padded input sequence.
211
+ batch: int, batch size for the padded sequence.
212
+ seqlen: int, maximum sequence length for the padded sequence.
213
+ Return:
214
+ hidden_states: (batch, seqlen, ...)
215
+ """
216
+ dim = hidden_states.shape[-1]
217
+ # output = torch.zeros((batch * seqlen), dim, device=hidden_states.device, dtype=hidden_states.dtype)
218
+ # output[indices] = hidden_states
219
+ output = index_put_first_axis(hidden_states, indices, batch * seqlen)
220
+ return rearrange(output, "(b s) ... -> b s ...", b=batch)
block.py ADDED
@@ -0,0 +1,401 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2024, Tri Dao.
2
+
3
+ """"
4
+ The implementation was adopted from
5
+ https://github.com/Dao-AILab/flash-attention/blob/43950dda456e095969d842fca7a73c5bfe3cecd0
6
+ """
7
+
8
+ from functools import partial
9
+ from typing import Optional
10
+
11
+ import torch
12
+ import torch.nn as nn
13
+ from torch import Tensor
14
+ from torchvision.ops import StochasticDepth
15
+
16
+ from .mha import MHA
17
+ from .mlp import Mlp
18
+
19
+ try:
20
+ from flash_attn.ops.triton.layer_norm import layer_norm_fn, RMSNorm
21
+ except ImportError:
22
+ layer_norm_fn, RMSNorm = None, None
23
+
24
+
25
+ class Block(nn.Module):
26
+ def __init__(
27
+ self,
28
+ dim,
29
+ mixer_cls=None,
30
+ mlp_cls=None,
31
+ norm_cls=nn.LayerNorm,
32
+ dropout_cls=nn.Dropout,
33
+ prenorm=True,
34
+ resid_dropout1=0.0,
35
+ resid_dropout2=0.0,
36
+ drop_path1=0.0,
37
+ drop_path2=0.0,
38
+ fused_dropout_add_ln=False,
39
+ return_residual=False,
40
+ residual_in_fp32=False,
41
+ sequence_parallel=False,
42
+ mark_shared_params=False,
43
+ ):
44
+ """
45
+ For prenorm=True, this Block has a slightly different structure compared to a regular
46
+ prenorm Transformer block.
47
+ The standard block is: LN -> MHA -> Dropout -> Add -> LN -> MLP -> Dropout -> Add.
48
+ [Ref: https://arxiv.org/abs/2002.04745]
49
+ Here we have: Dropout -> Add -> LN -> MHA -> Dropout -> Add -> LN -> MLP, returning both
50
+ the hidden_states (output of the MLP) and the residual.
51
+ This is for performance reasons, as we can fuse the dropout, add and LayerNorm.
52
+ The residual needs to be provided (except for the very first block).
53
+
54
+ For prenorm=False, this Block has the same structure as a regular postnorm Transformer
55
+ block: MHA -> Dropout -> Add -> LN -> MLP -> Dropout -> Add -> LN.
56
+
57
+ return_residual: whether each of the sub-layers (mixer and mlp) will return the residual.
58
+ This is for performance reason: for post-norm architecture, returning the input allows us
59
+ to fuse the backward of nn.Linear with the residual connection.
60
+ """
61
+ super().__init__()
62
+ self.prenorm = prenorm
63
+ self.fused_dropout_add_ln = fused_dropout_add_ln
64
+ self.return_residual = return_residual
65
+ self.residual_in_fp32 = residual_in_fp32
66
+ if self.residual_in_fp32:
67
+ assert self.prenorm, "residual_in_fp32 is only compatible with prenorm=True"
68
+ if mixer_cls is None:
69
+ mixer_cls = partial(MHA, num_heads=dim // 64)
70
+ if mlp_cls is None:
71
+ mlp_cls = partial(Mlp, hidden_features=4 * dim)
72
+ self.mixer = mixer_cls(dim)
73
+ self.dropout1 = dropout_cls(resid_dropout1)
74
+ self.drop_path1 = StochasticDepth(drop_path1, mode="row")
75
+ self.norm1 = norm_cls(dim)
76
+ self.mlp = mlp_cls(dim)
77
+ if not isinstance(self.mlp, nn.Identity):
78
+ self.dropout2 = dropout_cls(resid_dropout2)
79
+ self.drop_path2 = StochasticDepth(drop_path2, mode="row")
80
+ self.norm2 = norm_cls(dim)
81
+
82
+ if self.fused_dropout_add_ln:
83
+ assert layer_norm_fn is not None, "Triton is not installed"
84
+ assert isinstance(self.norm1, (nn.LayerNorm, RMSNorm)) and isinstance(
85
+ self.dropout1, nn.Dropout
86
+ )
87
+
88
+ # TD [2023-01-07]: TODO: During training, if sequence_parallel is False and dropout != 0.0,
89
+ # then the input to each worker in the tensor parallel group will be different.
90
+ # This would produce wrong outputs? Somehow we'd need to sync the RNG state across workers.
91
+ # For now this is not an issue because we always use sequence_parallel=True during training
92
+ # and only use sequence_parallel=False during inference.
93
+
94
+ # Mark the norm parameters as "sequence_parallel" so that we run all-reduce on their grads.
95
+ if sequence_parallel:
96
+ for p in self.norm1.parameters():
97
+ p._sequence_parallel = True
98
+ if hasattr(self, "norm2"):
99
+ for p in self.norm2.parameters():
100
+ p._sequence_parallel = True
101
+ # Mark the norm parameters as "shared_params" so that we sync their values at init.
102
+ if mark_shared_params:
103
+ for p in self.norm1.parameters():
104
+ p._shared_params = True
105
+ if hasattr(self, "norm2"):
106
+ for p in self.norm2.parameters():
107
+ p._shared_params = True
108
+
109
+ def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs):
110
+ return self.mixer.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs)
111
+
112
+ def forward(
113
+ self,
114
+ hidden_states: Tensor,
115
+ residual: Optional[Tensor] = None,
116
+ mixer_subset=None,
117
+ mixer_kwargs=None,
118
+ ):
119
+ r"""Pass the input through the encoder layer.
120
+
121
+ Args:
122
+ hidden_states: the sequence to the encoder layer (required).
123
+ residual: if postnorm, residual=None, If prenorm, hidden_states = Attn/MLP(LN(residual))
124
+ mixer_subset: for cross-attention only. If not None, will take a subset of x
125
+ before applying the query projection. Useful for e.g., ViT where we only care
126
+ about the CLS token in the last layer.
127
+ """
128
+ if self.prenorm:
129
+ if not self.fused_dropout_add_ln:
130
+ dropped = self.drop_path1(self.dropout1(hidden_states))
131
+ residual = (dropped + residual) if residual is not None else dropped
132
+ hidden_states = self.norm1(residual.to(dtype=self.norm1.weight.dtype))
133
+ if self.residual_in_fp32:
134
+ residual = residual.to(torch.float32)
135
+ else:
136
+ if self.drop_path1.p == 0 or not self.training:
137
+ rowscale1 = None
138
+ else:
139
+ rowscale1 = self.drop_path1(
140
+ torch.ones(
141
+ hidden_states.shape[:-1],
142
+ device=hidden_states.device,
143
+ dtype=hidden_states.dtype,
144
+ )
145
+ )
146
+ hidden_states, residual = layer_norm_fn(
147
+ hidden_states,
148
+ self.norm1.weight,
149
+ self.norm1.bias,
150
+ residual=residual,
151
+ eps=self.norm1.eps,
152
+ dropout_p=self.dropout1.p if self.training else 0.0,
153
+ rowscale=rowscale1,
154
+ prenorm=True,
155
+ residual_in_fp32=self.residual_in_fp32,
156
+ is_rms_norm=isinstance(self.norm1, RMSNorm)
157
+ )
158
+ if mixer_kwargs is None:
159
+ mixer_kwargs = {}
160
+ if mixer_subset is not None:
161
+ mixer_kwargs["mixer_subset"] = mixer_subset
162
+ hidden_states = self.mixer(hidden_states, **mixer_kwargs)
163
+ if mixer_subset is not None:
164
+ residual = residual[:, mixer_subset]
165
+ if not isinstance(self.mlp, nn.Identity):
166
+ if not self.fused_dropout_add_ln:
167
+ dropped = self.drop_path2(self.dropout2(hidden_states))
168
+ residual = (dropped + residual) if residual is not None else dropped
169
+ hidden_states = self.norm2(residual.to(dtype=self.norm2.weight.dtype))
170
+ if self.residual_in_fp32:
171
+ residual = residual.to(torch.float32)
172
+ else:
173
+ if self.drop_path2.p == 0 or not self.training:
174
+ rowscale2 = None
175
+ else:
176
+ rowscale2 = self.drop_path2(
177
+ torch.ones(
178
+ hidden_states.shape[:-1],
179
+ device=hidden_states.device,
180
+ dtype=hidden_states.dtype,
181
+ )
182
+ )
183
+ hidden_states, residual = layer_norm_fn(
184
+ hidden_states,
185
+ self.norm2.weight,
186
+ self.norm2.bias,
187
+ residual=residual,
188
+ eps=self.norm2.eps,
189
+ dropout_p=self.dropout2.p if self.training else 0.0,
190
+ rowscale=rowscale2,
191
+ prenorm=True,
192
+ residual_in_fp32=self.residual_in_fp32,
193
+ is_rms_norm=isinstance(self.norm2, RMSNorm)
194
+ )
195
+ hidden_states = self.mlp(hidden_states)
196
+ return hidden_states, residual
197
+ else:
198
+ assert residual is None
199
+ mixer_out = self.mixer(
200
+ hidden_states, **(mixer_kwargs if mixer_kwargs is not None else {})
201
+ )
202
+ if self.return_residual: # mixer out is actually a pair here
203
+ mixer_out, hidden_states = mixer_out
204
+ if not self.fused_dropout_add_ln:
205
+ hidden_states = self.norm1(
206
+ (self.drop_path1(self.dropout1(mixer_out)) + hidden_states).to(
207
+ dtype=self.norm1.weight.dtype
208
+ )
209
+ )
210
+ else:
211
+ if self.drop_path1.p == 0 or not self.training:
212
+ rowscale1 = None
213
+ else:
214
+ rowscale1 = self.drop_path1(
215
+ torch.ones(
216
+ mixer_out.shape[:-1], device=mixer_out.device, dtype=mixer_out.dtype
217
+ )
218
+ )
219
+ hidden_states = layer_norm_fn(
220
+ mixer_out,
221
+ self.norm1.weight,
222
+ self.norm1.bias,
223
+ residual=hidden_states,
224
+ eps=self.norm1.eps,
225
+ dropout_p=self.dropout1.p if self.training else 0.0,
226
+ rowscale=rowscale1,
227
+ prenorm=False,
228
+ is_rms_norm=isinstance(self.norm1, RMSNorm)
229
+ )
230
+ if not isinstance(self.mlp, nn.Identity):
231
+ mlp_out = self.mlp(hidden_states)
232
+ if self.return_residual: # mlp out is actually a pair here
233
+ mlp_out, hidden_states = mlp_out
234
+ if not self.fused_dropout_add_ln:
235
+ hidden_states = self.norm2(
236
+ (self.drop_path2(self.dropout2(mlp_out)) + hidden_states).to(
237
+ dtype=self.norm2.weight.dtype
238
+ )
239
+ )
240
+ else:
241
+ if self.drop_path2.p == 0 or not self.training:
242
+ rowscale2 = None
243
+ else:
244
+ rowscale2 = self.drop_path2(
245
+ torch.ones(
246
+ mlp_out.shape[:-1], device=mlp_out.device, dtype=mlp_out.dtype
247
+ )
248
+ )
249
+ hidden_states = layer_norm_fn(
250
+ mlp_out,
251
+ self.norm2.weight,
252
+ self.norm2.bias,
253
+ residual=hidden_states,
254
+ eps=self.norm2.eps,
255
+ dropout_p=self.dropout2.p if self.training else 0.0,
256
+ rowscale=rowscale2,
257
+ prenorm=False,
258
+ is_rms_norm=isinstance(self.norm2, RMSNorm)
259
+ )
260
+ return hidden_states
261
+
262
+
263
+ class ParallelBlock(nn.Module):
264
+ """The attention (mixer) and MLP blocks are done in parallel, similar to GPT-J, GPT-NeoX,
265
+ and PaLM.
266
+ """
267
+
268
+ def __init__(
269
+ self,
270
+ dim,
271
+ mixer_cls=None,
272
+ mlp_cls=None,
273
+ norm_cls=nn.LayerNorm,
274
+ dropout_cls=nn.Dropout,
275
+ resid_dropout1=0.0,
276
+ resid_dropout2=0.0,
277
+ tied_norm=False,
278
+ fused_dropout_add_ln=False,
279
+ residual_in_fp32=False,
280
+ sequence_parallel=False,
281
+ mark_shared_params=False,
282
+ ):
283
+ """
284
+ This Block has a slightly different structure compared to a regular
285
+ prenorm Transformer block.
286
+ The standard block is: LN -> MHA / MLP -> Dropout -> Add.
287
+ [Ref: https://arxiv.org/abs/2002.04745]
288
+ Here we have: Dropout -> Add -> LN -> MHA / MLP, returning both
289
+ the hidden_states (output1 of the MHA / MLP) and the residual.
290
+ This is for performance reasons, as we can fuse the dropout, add and LayerNorm.
291
+ The residual needs to be provided (except for the very first block).
292
+ """
293
+ super().__init__()
294
+ self.tied_norm = tied_norm
295
+ self.fused_dropout_add_ln = fused_dropout_add_ln
296
+ self.residual_in_fp32 = residual_in_fp32
297
+ if mixer_cls is None:
298
+ mixer_cls = partial(MHA, num_heads=dim // 64)
299
+ if mlp_cls is None:
300
+ mlp_cls = partial(Mlp, hidden_features=4 * dim)
301
+ self.mixer = mixer_cls(dim)
302
+ self.dropout1 = dropout_cls(resid_dropout1)
303
+ self.norm1 = norm_cls(dim)
304
+ self.mlp = mlp_cls(dim)
305
+ self.dropout2 = dropout_cls(resid_dropout2)
306
+ if not self.tied_norm:
307
+ self.norm2 = norm_cls(dim)
308
+
309
+ if self.fused_dropout_add_ln:
310
+ assert layer_norm_fn is not None, "Triton is not installed"
311
+ assert isinstance(self.norm1, (nn.LayerNorm, RMSNorm)) and isinstance(
312
+ self.dropout1, nn.Dropout
313
+ )
314
+
315
+ # TD [2023-01-07]: TODO: During training, if sequence_parallel is False and dropout != 0.0,
316
+ # then the input to each worker in the tensor parallel group will be different.
317
+ # This would produce wrong outputs? Somehow we'd need to sync the RNG state across workers.
318
+ # For now this is not an issue because we always use sequence_parallel=True during training
319
+ # and only use sequence_parallel=False during inference.
320
+
321
+ # Mark the norm parameters as "sequence_parallel" so that we run all-reduce on their grads.
322
+ if sequence_parallel:
323
+ for p in self.norm1.parameters():
324
+ p._sequence_parallel = True
325
+ if hasattr(self, "norm2"):
326
+ for p in self.norm2.parameters():
327
+ p._sequence_parallel = True
328
+ # Mark the norm parameters as "shared_params" so that we sync their values at init.
329
+ if mark_shared_params:
330
+ for p in self.norm1.parameters():
331
+ p._shared_params = True
332
+ if hasattr(self, "norm2"):
333
+ for p in self.norm2.parameters():
334
+ p._shared_params = True
335
+
336
+ def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None, **kwargs):
337
+ return self.mixer.allocate_inference_cache(batch_size, max_seqlen, dtype=dtype, **kwargs)
338
+
339
+ def forward(
340
+ self,
341
+ hidden_states1: Tensor,
342
+ hidden_states2: Optional[Tensor] = None,
343
+ residual: Optional[Tensor] = None,
344
+ mixer_kwargs=None,
345
+ ):
346
+ r"""Pass the input through the encoder layer.
347
+
348
+ Args:
349
+ hidden_states1: the output of the previous attention (mixer) or embedding layer.
350
+ hidden_states2: the output of the previous MLP layer (if None, will use hidden_states1).
351
+ residual.
352
+ """
353
+ # TODO: Ideally we should only do the allgather / allreduce once for
354
+ # the Linear to MLP & Attention
355
+ if not self.fused_dropout_add_ln:
356
+ dropped1 = self.dropout1(hidden_states1)
357
+ # For the very 1st block, we only want 1 dropout, not two different dropouts
358
+ if hidden_states2 is not None:
359
+ dropped2 = self.dropout2(hidden_states2)
360
+ residual = (
361
+ (residual + dropped1 + dropped2)
362
+ if residual is not None
363
+ else dropped1 + dropped2
364
+ )
365
+ else:
366
+ residual = (residual + dropped1) if residual is not None else dropped1
367
+ hidden_states1 = self.norm1(residual.to(dtype=self.norm1.weight.dtype))
368
+ hidden_states2 = (
369
+ self.norm2(residual.to(dtype=self.norm2.weight.dtype))
370
+ if not self.tied_norm
371
+ else hidden_states1
372
+ )
373
+ if self.residual_in_fp32:
374
+ residual = residual.to(torch.float32)
375
+ else:
376
+ weight2, bias2 = (
377
+ (self.norm2.weight, self.norm2.bias) if not self.tied_norm else (None, None)
378
+ )
379
+ hidden_states1, *rest, residual = layer_norm_fn(
380
+ hidden_states1,
381
+ self.norm1.weight,
382
+ self.norm1.bias,
383
+ residual=residual,
384
+ x1=hidden_states2,
385
+ weight1=weight2,
386
+ bias1=bias2,
387
+ eps=self.norm1.eps,
388
+ dropout_p=self.dropout1.p if self.training else 0.0,
389
+ prenorm=True,
390
+ residual_in_fp32=self.residual_in_fp32,
391
+ is_rms_norm=isinstance(self.norm1, RMSNorm)
392
+ )
393
+ if self.tied_norm:
394
+ hidden_states2 = hidden_states1
395
+ else:
396
+ hidden_states2, = rest
397
+ if mixer_kwargs is None:
398
+ mixer_kwargs = {}
399
+ hidden_states1 = self.mixer(hidden_states1, **mixer_kwargs)
400
+ hidden_states2 = self.mlp(hidden_states2)
401
+ return hidden_states1, hidden_states2, residual
config .json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "jinaai/jina-bert-flash-implementation",
3
+ "auto_map": {
4
+ "AutoConfig": "jinaai/jina-bert-flash-implementation--configuration_bert.JinaBertConfig",
5
+ "AutoModel": "jinaai/jina-bert-flash-implementation--modeling_bert.BertModel",
6
+ "AutoModelForPreTraining": "jinaai/jina-bert-flash-implementation--modeling_bert.BertForPreTraining",
7
+ "AutoModelForMaskedLM": "jinaai/jina-bert-flash-implementation--modeling_bert.BertForPreTraining"
8
+ },
9
+ "attention_probs_dropout_prob": 0.1,
10
+ "classifier_dropout": null,
11
+ "dense_seq_output": false,
12
+ "emb_pooler": null,
13
+ "fused_bias_fc": false,
14
+ "fused_dropout_add_ln": false,
15
+ "hidden_act": "gelu",
16
+ "hidden_dropout_prob": 0.1,
17
+ "hidden_size": 768,
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 3072,
20
+ "last_layer_subset": false,
21
+ "layer_norm_eps": 1e-12,
22
+ "mlp_checkpoint_lvl": 0,
23
+ "mlp_type": "glu",
24
+ "model_type": "bert",
25
+ "num_attention_heads": 12,
26
+ "num_hidden_layers": 12,
27
+ "num_loras": 5,
28
+ "pad_token_id": 0,
29
+ "pad_vocab_size_multiple": 1,
30
+ "torch_dtype": "float16",
31
+ "transformers_version": "4.39.3",
32
+ "type_vocab_size": 2,
33
+ "use_flash_attn": null,
34
+ "use_qk_norm": false,
35
+ "vocab_size": 30528,
36
+ "window_size": [
37
+ -1,
38
+ -1
39
+ ]
40
+ }
configuration_bert.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
3
+ # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ """ BERT model configuration"""
17
+
18
+ from transformers import PretrainedConfig
19
+
20
+
21
+ class JinaBertConfig(PretrainedConfig):
22
+ r"""
23
+ This is the configuration class to store the configuration of a [`BertModel`] or a [`TFBertModel`]. It is used to
24
+ instantiate a BERT model according to the specified arguments, defining the model architecture. Instantiating a
25
+ configuration with the defaults will yield a similar configuration to that of the BERT
26
+ [google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased) architecture.
27
+
28
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
29
+ documentation from [`PretrainedConfig`] for more information.
30
+
31
+
32
+ Args:
33
+ vocab_size (`int`, *optional*, defaults to 30522):
34
+ Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the
35
+ `inputs_ids` passed when calling [`BertModel`] or [`TFBertModel`].
36
+ hidden_size (`int`, *optional*, defaults to 768):
37
+ Dimensionality of the encoder layers and the pooler layer.
38
+ num_hidden_layers (`int`, *optional*, defaults to 12):
39
+ Number of hidden layers in the Transformer encoder.
40
+ num_attention_heads (`int`, *optional*, defaults to 12):
41
+ Number of attention heads for each attention layer in the Transformer encoder.
42
+ intermediate_size (`int`, *optional*, defaults to 3072):
43
+ Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder.
44
+ hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`):
45
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
46
+ `"relu"`, `"silu"` and `"gelu_new"` are supported.
47
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.1):
48
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
49
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1):
50
+ The dropout ratio for the attention probabilities.
51
+ type_vocab_size (`int`, *optional*, defaults to 2):
52
+ The vocabulary size of the `token_type_ids` passed when calling [`BertModel`] or [`TFBertModel`].
53
+ initializer_range (`float`, *optional*, defaults to 0.02):
54
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
55
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
56
+ The epsilon used by the layer normalization layers.
57
+ window_size (`tuple`, *optional*, defaults to `(-1, -1)`): If not the default, use local attention
58
+ """
59
+
60
+ model_type = "bert"
61
+
62
+ def __init__(
63
+ self,
64
+ vocab_size=30522,
65
+ hidden_size=768,
66
+ num_hidden_layers=12,
67
+ num_attention_heads=12,
68
+ intermediate_size=3072,
69
+ hidden_act="gelu",
70
+ hidden_dropout_prob=0.1,
71
+ attention_probs_dropout_prob=0.1,
72
+ type_vocab_size=2,
73
+ initializer_range=0.02,
74
+ layer_norm_eps=1e-12,
75
+ pad_token_id=0,
76
+ window_size=(-1, -1),
77
+ dense_seq_output=False,
78
+ mlp_type='mlp',
79
+ mlp_checkpoint_lvl=0,
80
+ last_layer_subset=False,
81
+ fused_dropout_add_ln=False,
82
+ fused_bias_fc=False,
83
+ pad_vocab_size_multiple=1,
84
+ use_flash_attn=True,
85
+ use_qk_norm=True,
86
+ emb_pooler=None,
87
+ classifier_dropout=None,
88
+ num_loras=5,
89
+ **kwargs,
90
+ ):
91
+ assert 'position_embedding_type' not in kwargs
92
+ assert 'max_position_embeddings' not in kwargs
93
+ super().__init__(pad_token_id=pad_token_id, **kwargs)
94
+
95
+ if mlp_type == 'fused_mlp' and hidden_act not in ["gelu_new", "gelu_fast", "gelu_pytorch_tanh"]:
96
+ raise ValueError('Fused MLP only supports approximate gelu')
97
+
98
+ self.vocab_size = vocab_size
99
+ self.hidden_size = hidden_size
100
+ self.num_hidden_layers = num_hidden_layers
101
+ self.num_attention_heads = num_attention_heads
102
+ self.hidden_act = hidden_act
103
+ self.intermediate_size = intermediate_size
104
+ self.hidden_dropout_prob = hidden_dropout_prob
105
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
106
+ self.type_vocab_size = type_vocab_size
107
+ self.initializer_range = initializer_range
108
+ self.layer_norm_eps = layer_norm_eps
109
+ self.window_size = window_size
110
+ self.dense_seq_output = dense_seq_output
111
+ self.mlp_type= mlp_type
112
+ self.mlp_checkpoint_lvl = mlp_checkpoint_lvl
113
+ self.last_layer_subset = last_layer_subset
114
+ self.fused_dropout_add_ln = fused_dropout_add_ln
115
+ self.fused_bias_fc = fused_bias_fc
116
+ self.pad_vocab_size_multiple = pad_vocab_size_multiple
117
+ self.use_flash_attn = use_flash_attn
118
+ self.use_qk_norm = use_qk_norm
119
+ self.emb_pooler = emb_pooler
120
+ self.classifier_dropout = classifier_dropout
121
+ self.num_loras = num_loras
convert_v2_weights.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from collections import OrderedDict
3
+ from transformers import AutoModel, AutoTokenizer
4
+ from .configuration_bert import JinaBertConfig
5
+ import torch
6
+ from .modeling_bert import BertModel
7
+
8
+ def remap_state_dict(state_dict, config: JinaBertConfig):
9
+ """
10
+ Map the state_dict of a Huggingface BERT model to be flash_attn compatible.
11
+ """
12
+
13
+ # LayerNorm
14
+ def key_mapping_ln_gamma_beta(key):
15
+ key = re.sub(r"LayerNorm.gamma$", "LayerNorm.weight", key)
16
+ key = re.sub(r"LayerNorm.beta$", "LayerNorm.bias", key)
17
+ return key
18
+
19
+ state_dict = OrderedDict((key_mapping_ln_gamma_beta(k), v) for k, v in state_dict.items())
20
+
21
+ # Layers
22
+ def key_mapping_layers(key):
23
+ return re.sub(r"^encoder.layer.", "encoder.layers.", key)
24
+
25
+ state_dict = OrderedDict((key_mapping_layers(k), v) for k, v in state_dict.items())
26
+
27
+ # LayerNorm
28
+ def key_mapping_ln(key):
29
+ key = re.sub(r"^embeddings.LayerNorm.", "emb_ln.", key)
30
+ key = re.sub(
31
+ r"^encoder.layers.(\d+).attention.output.LayerNorm.(weight|bias)",
32
+ r"encoder.layers.\1.norm1.\2",
33
+ key,
34
+ )
35
+ key = re.sub(
36
+ r"^encoder.layers.(\d+).output.LayerNorm.(weight|bias)",
37
+ r"encoder.layers.\1.norm2.\2",
38
+ key,
39
+ )
40
+ key = re.sub(
41
+ r"^cls.predictions.transform.LayerNorm.(weight|bias)",
42
+ r"cls.predictions.transform.layer_norm.\1",
43
+ key,
44
+ )
45
+ return key
46
+
47
+ state_dict = OrderedDict((key_mapping_ln(k), v) for k, v in state_dict.items())
48
+
49
+ # MLP
50
+ def key_mapping_mlp(key):
51
+ key = re.sub(
52
+ r"^encoder.layers.(\d+).intermediate.dense.(weight|bias)",
53
+ r"encoder.layers.\1.mlp.fc1.\2",
54
+ key,
55
+ )
56
+ key = re.sub(
57
+ r"^encoder.layers.(\d+).output.dense.(weight|bias)",
58
+ r"encoder.layers.\1.mlp.fc2.\2",
59
+ key,
60
+ )
61
+ return key
62
+
63
+ state_dict = OrderedDict((key_mapping_mlp(k), v) for k, v in state_dict.items())
64
+
65
+ # Attention
66
+ last_layer_subset = getattr(config, "last_layer_subset", False)
67
+ for d in range(config.num_hidden_layers):
68
+ Wq = state_dict.pop(f"encoder.layers.{d}.attention.self.query.weight")
69
+ Wk = state_dict.pop(f"encoder.layers.{d}.attention.self.key.weight")
70
+ Wv = state_dict.pop(f"encoder.layers.{d}.attention.self.value.weight")
71
+ bq = state_dict.pop(f"encoder.layers.{d}.attention.self.query.bias")
72
+ bk = state_dict.pop(f"encoder.layers.{d}.attention.self.key.bias")
73
+ bv = state_dict.pop(f"encoder.layers.{d}.attention.self.value.bias")
74
+ if not (last_layer_subset and d == config.num_hidden_layers - 1):
75
+ state_dict[f"encoder.layers.{d}.mixer.Wqkv.weight"] = torch.cat(
76
+ [Wq, Wk, Wv], dim=0
77
+ )
78
+ state_dict[f"encoder.layers.{d}.mixer.Wqkv.bias"] = torch.cat([bq, bk, bv], dim=0)
79
+ else:
80
+ state_dict[f"encoder.layers.{d}.mixer.Wq.weight"] = Wq
81
+ state_dict[f"encoder.layers.{d}.mixer.Wkv.weight"] = torch.cat([Wk, Wv], dim=0)
82
+ state_dict[f"encoder.layers.{d}.mixer.Wq.bias"] = bq
83
+ state_dict[f"encoder.layers.{d}.mixer.Wkv.bias"] = torch.cat([bk, bv], dim=0)
84
+
85
+ def key_mapping_attn(key):
86
+ return re.sub(
87
+ r"^encoder.layers.(\d+).attention.output.dense.(weight|bias)",
88
+ r"encoder.layers.\1.mixer.out_proj.\2",
89
+ key,
90
+ )
91
+
92
+ state_dict = OrderedDict((key_mapping_attn(k), v) for k, v in state_dict.items())
93
+
94
+ def key_mapping_decoder_bias(key):
95
+ return re.sub(r"^cls.predictions.bias", "cls.predictions.decoder.bias", key)
96
+
97
+ state_dict = OrderedDict((key_mapping_decoder_bias(k), v) for k, v in state_dict.items())
98
+
99
+ # Word embedding
100
+ pad_vocab_size_multiple = getattr(config, "pad_vocab_size_multiple", 1)
101
+ if pad_vocab_size_multiple > 1:
102
+ word_embeddings = state_dict["embeddings.word_embeddings.weight"]
103
+ state_dict["embeddings.word_embeddings.weight"] = F.pad(
104
+ word_embeddings, (0, 0, 0, config.vocab_size - word_embeddings.shape[0])
105
+ )
106
+ decoder_weight = state_dict["cls.predictions.decoder.weight"]
107
+ state_dict["cls.predictions.decoder.weight"] = F.pad(
108
+ decoder_weight, (0, 0, 0, config.vocab_size - decoder_weight.shape[0])
109
+ )
110
+ # If the vocab was padded, we want to set the decoder bias for those padded indices to be
111
+ # strongly negative (i.e. the decoder shouldn't predict those indices).
112
+ # TD [2022-05-09]: I don't think it affects the MLPerf training.
113
+ decoder_bias = state_dict["cls.predictions.decoder.bias"]
114
+ state_dict["cls.predictions.decoder.bias"] = F.pad(
115
+ decoder_bias, (0, config.vocab_size - decoder_bias.shape[0]), value=-100.0
116
+ )
117
+
118
+ # LayerNorm
119
+ def key_mapping_layernorm(key):
120
+ return re.sub(r'^encoder.layers.(\d+).mlp.layernorm.(weight|bias)', r"encoder.layers.\1.norm2.\2", key)
121
+
122
+ state_dict = OrderedDict((key_mapping_layernorm(k), v) for k, v in state_dict.items())
123
+
124
+ return state_dict
125
+
126
+
127
+ v2_model = AutoModel.from_pretrained('jinaai/jina-embeddings-v2-base-en', trust_remote_code=True)
128
+ config = JinaBertConfig(vocab_size=30528, use_qk_norm=False, mlp_type='glu', hidden_act='gelu')
129
+ state_dict = v2_model.state_dict()
130
+ new_state_dict = remap_state_dict(state_dict, config)
131
+ flash_model = BertModel(config)
132
+ flash_model.load_state_dict(new_state_dict)
133
+
134
+
135
+ torch.save(new_state_dict, 'converted_weights.bin')
136
+ print(config.to_json_string())
137
+
138
+
139
+ """
140
+ tokenizer = AutoTokenizer.from_pretrained('jinaai/jina-embeddings-v2-base-en')
141
+ inp = tokenizer.batch_encode_plus(['Hello world', 'How is the weather today?', 'It is raining a lot in Berlin'], return_tensors='pt', padding=True).to('cuda')
142
+ v2_model.eval()
143
+ flash_model.eval()
144
+ v2_model = v2_model.to('cuda', torch.float16)
145
+ flash_model = flash_model.to('cuda', torch.float16)
146
+ output_v2 = v2_model(**inp)
147
+ output_flash = flash_model(**inp)
148
+ x = output_v2.last_hidden_state
149
+ y = output_flash.last_hidden_state
150
+ print(torch.abs(x - y))
151
+ """
embedding.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2022, Tri Dao.
2
+
3
+ """"
4
+ The implementation was adopted from
5
+ https://github.com/Dao-AILab/flash-attention/blob/43950dda456e095969d842fca7a73c5bfe3cecd0/flash_attn/models/bert.py
6
+ """
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ from torch import Tensor
11
+
12
+
13
+ class BertEmbeddings(nn.Module):
14
+ def __init__(
15
+ self,
16
+ embed_dim,
17
+ vocab_size,
18
+ max_position_embeddings,
19
+ type_vocab_size,
20
+ padding_idx=None,
21
+ device=None,
22
+ dtype=None,
23
+ ):
24
+ """
25
+ If max_position_embeddings <= 0, there's no position embeddings
26
+ If type_vocab_size <= 0, there's no token type embeddings
27
+ """
28
+ factory_kwargs = {"device": device, "dtype": dtype}
29
+ super().__init__()
30
+ self.word_embeddings = nn.Embedding(
31
+ vocab_size, embed_dim, padding_idx=padding_idx, **factory_kwargs
32
+ )
33
+ self.max_position_embeddings = max_position_embeddings
34
+ self.type_vocab_size = type_vocab_size
35
+ if self.max_position_embeddings > 0:
36
+ self.position_embeddings = nn.Embedding(
37
+ max_position_embeddings, embed_dim, **factory_kwargs
38
+ )
39
+ if self.type_vocab_size > 0:
40
+ self.token_type_embeddings = nn.Embedding(type_vocab_size, embed_dim, **factory_kwargs)
41
+
42
+ def forward(self, input_ids, position_ids=None, token_type_ids=None):
43
+ """
44
+ input_ids: (batch, seqlen)
45
+ position_ids: (batch, seqlen)
46
+ token_type_ids: (batch, seqlen)
47
+ """
48
+ batch_size, seqlen = input_ids.shape
49
+ embeddings = self.word_embeddings(input_ids)
50
+ if self.max_position_embeddings > 0:
51
+ if position_ids is None:
52
+ position_ids = torch.arange(seqlen, dtype=torch.long, device=input_ids.device)
53
+ position_embeddings = self.position_embeddings(position_ids)
54
+ embeddings = embeddings + position_embeddings
55
+ if self.type_vocab_size > 0:
56
+ if token_type_ids is None:
57
+ token_type_ids = torch.zeros(seqlen, dtype=torch.long, device=input_ids.device)
58
+ token_type_embeddings = self.token_type_embeddings(token_type_ids)
59
+ embeddings = embeddings + token_type_embeddings
60
+ return embeddings
jina_clip_handler.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ from ts.torch_handler.base_handler import BaseHandler
3
+ from transformers import AutoModel, AutoProcessor, AutoTokenizer
4
+ import torch
5
+ from PIL import Image
6
+ import requests
7
+ from io import BytesIO
8
+
9
+ import logging
10
+ import os
11
+
12
+ import transformers
13
+ from jina_clip_implementation import modeling_clip, configuration_clip
14
+
15
+ import numpy as np
16
+ from time import time
17
+
18
+ from ts.torch_handler.base_handler import BaseHandler
19
+
20
+ logger = logging.getLogger(__name__)
21
+ logger.info("Transformers version %s", transformers.__version__)
22
+
23
+ class JinaClipHandler(BaseHandler):
24
+ """
25
+ A custom model handler implementation.
26
+ """
27
+
28
+ def __init__(self):
29
+ super(JinaClipHandler, self).__init__()
30
+ self.initialized = False
31
+
32
+ def initialize(self, ctx):
33
+ """ Loads the model.pt file and initializes the model object.
34
+ Instantiates Tokenizer for preprocessor to use
35
+ Loads labels to name mapping file for post-processing inference response
36
+ """
37
+ self.manifest = ctx.manifest
38
+ logger.info("ctx manifest: " + str(self.manifest))
39
+
40
+ properties = ctx.system_properties
41
+ logger.info("ctx properties: " + str(properties))
42
+ model_dir = properties.get("model_dir")
43
+ self.device = torch.device("cuda:" + str(properties.get("gpu_id")) if torch.cuda.is_available() else "cpu")
44
+
45
+
46
+ # Read model serialize/pt file
47
+ serialized_file = self.manifest["model"]["serializedFile"]
48
+ model_pt_path = os.path.join(model_dir, serialized_file)
49
+ if not os.path.isfile(model_pt_path):
50
+ raise RuntimeError("Missing the model.pt or pytorch_model.bin file")
51
+
52
+ # Load model from config.json path
53
+ # self.tokenizer = AutoTokenizer.from_pretrained(model_dir, local_files_only=True)
54
+ # self.model = AutoModel.from_pretrained(model_dir, local_files_only=True)
55
+ self.model_config = configuration_clip.JinaCLIPConfig()
56
+ self.model = modeling_clip.JinaCLIPModel(self.model_config)
57
+ self.model = torch.load(model_pt_path)
58
+ self.model.to(self.device)
59
+ self.model.eval()
60
+ logger.debug('Transformer model from path {0} loaded successfully'.format(model_pt_path))
61
+
62
+ self.initialized = True
63
+
64
+ def preprocess(self, data):
65
+ data = data[0]
66
+ texts = data.get("texts", [])
67
+ texts = [texts] if isinstance(texts, str) else texts
68
+ image_urls = data.get("image_urls", [])
69
+ image_base64 = data.get("image_base64", [])
70
+ image_urls = [image_urls] if isinstance(image_urls, str) else image_urls
71
+
72
+ if not texts and not image_urls:
73
+ raise ValueError("Missing 'texts' and/or 'image_urls' in the request.")
74
+
75
+ images = []
76
+ if image_urls:
77
+ for url in image_urls:
78
+ try:
79
+ response = requests.get(url, stream=True)
80
+ response.raise_for_status()
81
+ image = Image.open(BytesIO(response.content)).convert("RGB")
82
+ images.append(image)
83
+ except Exception as e:
84
+ raise ValueError(f"Error loading image from URL: {url}. Error: {e}")
85
+
86
+ return texts, image_urls
87
+ if image_base64:
88
+ return texts, image_base64
89
+
90
+ def inference(self, model_input):
91
+ res = {"text_embeddings": [], "image_embeddings": []}
92
+
93
+ texts, images = model_input
94
+ with torch.no_grad():
95
+ if texts:
96
+ res['text_embeddings'] = self.model.encode_text(texts)
97
+ if images:
98
+ res['image_embeddings'] = self.model.encode_image(images)
99
+ return res
100
+
101
+ def postprocess(self, inference_output):
102
+ for k, v in inference_output.items():
103
+ if len(v) > 0:
104
+ inference_output[k] = [i.tolist() for i in v]
105
+ return [inference_output]
106
+
107
+ def handle(self, data, context):
108
+ """
109
+ Invoke by TorchServe for prediction request.
110
+ Do pre-processing of data, prediction using model and postprocessing of prediciton output
111
+ :param data: Input data for prediction
112
+ :param context: Initial context contains model server system properties.
113
+ :return: prediction output
114
+ """
115
+
116
+ model_input = self.preprocess(data)
117
+ model_output = self.inference(model_input)
118
+ return self.postprocess(model_output)
mha.py ADDED
@@ -0,0 +1,821 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, Tri Dao.
2
+
3
+ """"
4
+ The implementation was adopted from
5
+ https://github.com/Dao-AILab/flash-attention/blob/43950dda456e095969d842fca7a73c5bfe3cecd0
6
+ and made modifications to
7
+ - support QK normalization
8
+ - make ALiBi run with MHA (needed to cast alibi slopes to fp32)
9
+ - make ALiBi run on CPU
10
+ """
11
+
12
+ import math
13
+ from functools import partial
14
+
15
+ import torch
16
+ import torch.nn as nn
17
+ from einops import rearrange, repeat
18
+
19
+ try:
20
+ from flash_attn import (
21
+ flash_attn_kvpacked_func,
22
+ flash_attn_qkvpacked_func,
23
+ flash_attn_varlen_kvpacked_func,
24
+ flash_attn_varlen_qkvpacked_func,
25
+ flash_attn_with_kvcache,
26
+ )
27
+ except ImportError:
28
+ flash_attn_varlen_qkvpacked_func, flash_attn_varlen_kvpacked_func = None, None
29
+ flash_attn_qkvpacked_func, flash_attn_kvpacked_func = None, None
30
+ flash_attn_with_kvcache = None
31
+
32
+ try:
33
+ from flash_attn.ops.fused_dense import ColumnParallelLinear, FusedDense, RowParallelLinear
34
+ except ImportError:
35
+ FusedDense, ColumnParallelLinear, RowParallelLinear = None, None, None
36
+
37
+ try:
38
+ from flash_attn.layers.rotary import RotaryEmbedding
39
+ except ImportError:
40
+ RotaryEmbedding = None
41
+
42
+
43
+ # From https://github.com/ofirpress/attention_with_linear_biases/blob/4b92f28a005ead2567abe2359f633e73e08f3833/fairseq/models/transformer.py#L742
44
+ def get_alibi_slopes(nheads):
45
+ def get_slopes_power_of_2(nheads):
46
+ start = 2 ** (-(2 ** -(math.log2(nheads) - 3)))
47
+ ratio = start
48
+ return [start * ratio**i for i in range(nheads)]
49
+
50
+ if math.log2(nheads).is_integer():
51
+ return get_slopes_power_of_2(nheads)
52
+ else:
53
+ closest_power_of_2 = 2 ** math.floor(math.log2(nheads))
54
+ return (
55
+ get_slopes_power_of_2(closest_power_of_2)
56
+ + get_alibi_slopes(2 * closest_power_of_2)[0::2][: nheads - closest_power_of_2]
57
+ )
58
+
59
+ class MultiHeadLayernorm(nn.Module):
60
+ def __init__(self, head_dim, num_heads, eps=1e-05, shared_normalization=False):
61
+ super().__init__()
62
+ if shared_normalization:
63
+ self._reduce_dims = (-2, -1)
64
+ else:
65
+ self._reduce_dims = (-1,)
66
+ self.weight = nn.Parameter(torch.ones((num_heads, head_dim)))
67
+ self.bias = nn.Parameter(torch.zeros((num_heads, head_dim)))
68
+ self.eps = eps
69
+
70
+ def forward(self, x):
71
+ var, mean = torch.var_mean(x, dim=self._reduce_dims, keepdim=True)
72
+ x = (x - mean) / torch.sqrt(var + self.eps)
73
+ return self.weight * x + self.bias
74
+
75
+ class FlashSelfAttention(nn.Module):
76
+ """Implement the scaled dot product attention with softmax.
77
+ Arguments
78
+ ---------
79
+ softmax_scale: The temperature to use for the softmax attention.
80
+ (default: 1/sqrt(d_keys) where d_keys is computed at
81
+ runtime)
82
+ attention_dropout: The dropout rate to apply to the attention
83
+ (default: 0.0)
84
+ """
85
+
86
+ def __init__(
87
+ self,
88
+ causal=False,
89
+ softmax_scale=None,
90
+ attention_dropout=0.0,
91
+ window_size=(-1, -1),
92
+ alibi_slopes=None,
93
+ deterministic=False,
94
+ qk_norm_kwargs=None,
95
+ ):
96
+ super().__init__()
97
+ assert flash_attn_varlen_qkvpacked_func is not None, "FlashAttention is not installed"
98
+ assert flash_attn_qkvpacked_func is not None, "FlashAttention is not installed"
99
+ self.causal = causal
100
+ self.softmax_scale = softmax_scale
101
+ self.drop = nn.Dropout(attention_dropout)
102
+ self.register_buffer("alibi_slopes", alibi_slopes, persistent=False)
103
+ self.window_size = window_size
104
+ self.deterministic = deterministic
105
+ if qk_norm_kwargs is not None:
106
+ self.qk_norm = True
107
+ self.q_layernorm = MultiHeadLayernorm(**qk_norm_kwargs)
108
+ self.k_layernorm = MultiHeadLayernorm(**qk_norm_kwargs)
109
+ else:
110
+ self.qk_norm = False
111
+ self.q_layernorm = None
112
+ self.k_layernorm = None
113
+
114
+ def forward(self, qkv, causal=None, cu_seqlens=None, max_seqlen=None):
115
+ """Implements the multihead softmax attention.
116
+ Arguments
117
+ ---------
118
+ qkv: The tensor containing the query, key, and value.
119
+ If cu_seqlens is None and max_seqlen is None, then qkv has shape (B, S, 3, H, D).
120
+ If cu_seqlens is not None and max_seqlen is not None, then qkv has shape
121
+ (total, 3, H, D), where total is the sum of the sequence lengths in the batch.
122
+ causal: if passed, will override self.causal
123
+ cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
124
+ of the sequences in the batch, used to index into qkv.
125
+ max_seqlen: int. Maximum sequence length in the batch.
126
+ Returns:
127
+ --------
128
+ out: (total, H, D) if cu_seqlens is not None and max_seqlen is not None,
129
+ else (B, S, H, D).
130
+ """
131
+ assert qkv.dtype in [torch.float16, torch.bfloat16]
132
+ assert qkv.is_cuda
133
+ if self.qk_norm:
134
+ if cu_seqlens is None:
135
+ assert qkv.shape[2] == 3
136
+ q, k, v = qkv.unbind(2)
137
+ q = self.q_layernorm(q)
138
+ k = self.k_layernorm(k)
139
+ qkv = torch.stack([q, k, v], dim=2)
140
+ else:
141
+ assert qkv.shape[1] == 3
142
+ q, k, v = qkv.unbind(1)
143
+ q = self.q_layernorm(q)
144
+ k = self.k_layernorm(k)
145
+ qkv = torch.stack([q, k, v], dim=1)
146
+ causal = self.causal if causal is None else causal
147
+ unpadded = cu_seqlens is not None
148
+ if self.alibi_slopes is not None:
149
+ self.alibi_slopes = self.alibi_slopes.to(torch.float32)
150
+ if unpadded:
151
+ assert cu_seqlens.dtype == torch.int32
152
+ assert max_seqlen is not None
153
+ assert isinstance(max_seqlen, int)
154
+ return flash_attn_varlen_qkvpacked_func(
155
+ qkv,
156
+ cu_seqlens,
157
+ max_seqlen,
158
+ self.drop.p if self.training else 0.0,
159
+ softmax_scale=self.softmax_scale,
160
+ causal=causal,
161
+ alibi_slopes=self.alibi_slopes,
162
+ window_size=self.window_size,
163
+ deterministic=self.deterministic,
164
+ )
165
+ else:
166
+ return flash_attn_qkvpacked_func(
167
+ qkv,
168
+ self.drop.p if self.training else 0.0,
169
+ softmax_scale=self.softmax_scale,
170
+ causal=causal,
171
+ alibi_slopes=self.alibi_slopes,
172
+ window_size=self.window_size,
173
+ deterministic=self.deterministic,
174
+ )
175
+
176
+
177
+ class FlashCrossAttention(nn.Module):
178
+ """Implement the scaled dot product attention with softmax.
179
+ Arguments
180
+ ---------
181
+ softmax_scale: The temperature to use for the softmax attention.
182
+ (default: 1/sqrt(d_keys) where d_keys is computed at
183
+ runtime)
184
+ attention_dropout: The dropout rate to apply to the attention
185
+ (default: 0.0)
186
+ """
187
+
188
+ def __init__(
189
+ self,
190
+ causal=False,
191
+ softmax_scale=None,
192
+ attention_dropout=0.0,
193
+ alibi_slopes=None,
194
+ window_size=(-1, -1),
195
+ deterministic=False,
196
+ ):
197
+ super().__init__()
198
+ assert flash_attn_varlen_kvpacked_func is not None, "FlashAttention is not installed"
199
+ assert flash_attn_kvpacked_func is not None, "FlashAttention is not installed"
200
+ self.causal = causal
201
+ self.softmax_scale = softmax_scale
202
+ self.drop = nn.Dropout(attention_dropout)
203
+ self.register_buffer("alibi_slopes", alibi_slopes, persistent=False)
204
+ self.window_size = window_size
205
+ self.deterministic = deterministic
206
+
207
+ def forward(
208
+ self,
209
+ q,
210
+ kv,
211
+ causal=None,
212
+ cu_seqlens=None,
213
+ max_seqlen=None,
214
+ cu_seqlens_k=None,
215
+ max_seqlen_k=None,
216
+ ):
217
+ """Implements the multihead softmax attention.
218
+ Arguments
219
+ ---------
220
+ q: The tensor containing the query. (B, Sq, H, D)
221
+ kv: The tensor containing the key and value. (B, Sk, 2, H_k, D)
222
+ causal: if passed, will override self.causal
223
+ cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
224
+ of the sequences in the batch, used to index into q.
225
+ max_seqlen: int. Maximum sequence length in the batch of q.
226
+ cu_seqlens_k: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
227
+ of the sequences in the batch, used to index into kv.
228
+ max_seqlen_k: int. Maximum sequence length in the batch of k and v.
229
+ """
230
+ assert q.dtype in [torch.float16, torch.bfloat16]
231
+ assert q.is_cuda and kv.is_cuda
232
+ causal = self.causal if causal is None else causal
233
+ unpadded = cu_seqlens is not None
234
+ if self.alibi_slopes is not None:
235
+ self.alibi_slopes = self.alibi_slopes.to(torch.float32)
236
+ if unpadded:
237
+ assert cu_seqlens.dtype == torch.int32
238
+ assert max_seqlen is not None
239
+ assert isinstance(max_seqlen, int)
240
+ assert cu_seqlens_k is not None
241
+ assert cu_seqlens_k.dtype == torch.int32
242
+ assert max_seqlen_k is not None
243
+ assert isinstance(max_seqlen, int)
244
+ return flash_attn_varlen_kvpacked_func(
245
+ q,
246
+ kv,
247
+ cu_seqlens,
248
+ cu_seqlens_k,
249
+ max_seqlen,
250
+ max_seqlen_k,
251
+ self.drop.p if self.training else 0.0,
252
+ softmax_scale=self.softmax_scale,
253
+ causal=causal,
254
+ alibi_slopes=self.alibi_slopes,
255
+ window_size=self.window_size,
256
+ deterministic=self.deterministic,
257
+ )
258
+ else:
259
+ batch_size, seqlen_q = q.shape[0], q.shape[1]
260
+ seqlen_k = kv.shape[1]
261
+ assert kv.shape[0] == batch_size and kv.shape[4] == q.shape[3]
262
+ return flash_attn_kvpacked_func(
263
+ q,
264
+ kv,
265
+ self.drop.p if self.training else 0.0,
266
+ causal=causal,
267
+ softmax_scale=self.softmax_scale,
268
+ alibi_slopes=self.alibi_slopes,
269
+ window_size=self.window_size,
270
+ deterministic=self.deterministic,
271
+ )
272
+
273
+
274
+ class SelfAttention(nn.Module):
275
+ """Implement the scaled dot product attention with softmax.
276
+ Arguments
277
+ ---------
278
+ softmax_scale: The temperature to use for the softmax attention.
279
+ (default: 1/sqrt(d_keys) where d_keys is computed at
280
+ runtime)
281
+ attention_dropout: The dropout rate to apply to the attention
282
+ (default: 0.0)
283
+ """
284
+ def __init__(self,
285
+ causal=False,
286
+ softmax_scale=None,
287
+ attention_dropout=0.0,
288
+ alibi_slopes=None,
289
+ qk_norm_kwargs=None,
290
+ ):
291
+ super().__init__()
292
+ self.causal = causal
293
+ self.softmax_scale = softmax_scale
294
+ self.drop = nn.Dropout(attention_dropout)
295
+ self.register_buffer('alibi_slopes', alibi_slopes, persistent=False)
296
+ if alibi_slopes is not None:
297
+ self.register_buffer('linear_biases', self._build_linear_biases(16), persistent=False)
298
+ else:
299
+ self.linear_biases = None
300
+ if qk_norm_kwargs is not None:
301
+ self.qk_norm = True
302
+ self.q_layernorm = MultiHeadLayernorm(**qk_norm_kwargs)
303
+ self.k_layernorm = MultiHeadLayernorm(**qk_norm_kwargs)
304
+ else:
305
+ self.qk_norm = False
306
+ self.q_layernorm = None
307
+ self.k_layernorm = None
308
+
309
+ def _build_linear_biases(self, seqlen):
310
+ context_position = torch.arange(seqlen, device=self.alibi_slopes.device)[:, None]
311
+ memory_position = torch.arange(seqlen, device=self.alibi_slopes.device)[None, :]
312
+ # distance tensor is of shape (seqlen, seqlen)
313
+ distance = torch.abs(memory_position - context_position)
314
+ # alibi tensor is of shape (1, H, seqlen, seqlen)
315
+ linear_biases = (distance[None, ...] * self.alibi_slopes[:, None, None])[None, ...]
316
+ return linear_biases
317
+
318
+ def forward(self, qkv, causal=None, key_padding_mask=None):
319
+ """Implements the multihead softmax attention.
320
+ Arguments
321
+ ---------
322
+ qkv: The tensor containing the query, key, and value. (B, S, 3, H, D)
323
+ causal: if passed, will override self.causal
324
+ key_padding_mask: boolean mask to apply to the attention weights. True means to keep,
325
+ False means to mask out. (B, S)
326
+ """
327
+ batch_size, seqlen = qkv.shape[0], qkv.shape[1]
328
+ causal = self.causal if causal is None else causal
329
+ q, k, v = qkv.unbind(dim=2)
330
+ if self.qk_norm:
331
+ q = self.q_layernorm(q)
332
+ k = self.k_layernorm(k)
333
+ softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1])
334
+ scores = torch.einsum("bthd,bshd->bhts", q, k * softmax_scale)
335
+ if key_padding_mask is not None:
336
+ padding_mask = torch.full(
337
+ (batch_size, seqlen), -10000.0, dtype=scores.dtype, device=scores.device
338
+ )
339
+ padding_mask.masked_fill_(key_padding_mask, 0.0)
340
+ # TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
341
+ scores = scores + rearrange(padding_mask, "b s -> b 1 1 s")
342
+ if self.alibi_slopes is not None:
343
+ if seqlen > self.linear_biases.shape[-1]:
344
+ self.linear_biases = self._build_linear_biases(seqlen)
345
+ cropped_biases = self.linear_biases[..., :seqlen, :seqlen]
346
+ scores = scores - cropped_biases
347
+ if causal:
348
+ # "triu_tril_cuda_template" not implemented for 'BFloat16'
349
+ # So we have to construct the mask in float
350
+ causal_mask = torch.triu(
351
+ torch.full((seqlen, seqlen), -10000.0, device=scores.device), 1
352
+ )
353
+ # TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
354
+ scores = scores + causal_mask.to(dtype=scores.dtype)
355
+ attention = torch.softmax(scores, dim=-1, dtype=v.dtype)
356
+ attention_drop = self.drop(attention)
357
+ output = torch.einsum("bhts,bshd->bthd", attention_drop, v)
358
+ return output
359
+
360
+
361
+ class CrossAttention(nn.Module):
362
+ """Implement the scaled dot product attention with softmax.
363
+ Arguments
364
+ ---------
365
+ softmax_scale: The temperature to use for the softmax attention.
366
+ (default: 1/sqrt(d_keys) where d_keys is computed at
367
+ runtime)
368
+ attention_dropout: The dropout rate to apply to the attention
369
+ (default: 0.0)
370
+ """
371
+
372
+ def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0):
373
+ super().__init__()
374
+ self.causal = causal
375
+ self.softmax_scale = softmax_scale
376
+ self.drop = nn.Dropout(attention_dropout)
377
+
378
+ def forward(self, q, kv, causal=None, key_padding_mask=None):
379
+ """Implements the multihead softmax attention.
380
+ Arguments
381
+ ---------
382
+ q: The tensor containing the query. (B, Sq, H, D)
383
+ kv: The tensor containing the key and value. (B, Sk, 2, H_k, D)
384
+ causal: if passed, will override self.causal
385
+ key_padding_mask: boolean mask to apply to the attention weights. True means to keep,
386
+ False means to mask out. (B, Sk)
387
+ """
388
+ batch_size, seqlen_q = q.shape[0], q.shape[1]
389
+ causal = self.causal if causal is None else causal
390
+ seqlen_k = kv.shape[1]
391
+ assert kv.shape[0] == batch_size and kv.shape[4] == q.shape[3]
392
+ if kv.shape[3] != q.shape[2]: # MQA/GQA
393
+ kv = repeat(kv, "... hkv d -> ... (hkv g) d", g=q.shape[2] // kv.shape[3])
394
+ k, v = kv.unbind(dim=2)
395
+ softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1])
396
+ scores = torch.einsum("bthd,bshd->bhts", q, k * softmax_scale)
397
+ if key_padding_mask is not None:
398
+ padding_mask = torch.full(
399
+ (batch_size, seqlen_k), -10000.0, dtype=scores.dtype, device=scores.device
400
+ )
401
+ padding_mask.masked_fill_(key_padding_mask, 0.0)
402
+ # TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
403
+ scores = scores + rearrange(padding_mask, "b s -> b 1 1 s")
404
+ if causal:
405
+ # causal mask needs to take into account the difference between seqlen_q and seqlen_k
406
+ row_idx = rearrange(
407
+ torch.arange(seqlen_q, device=q.device, dtype=torch.long), "s -> s 1"
408
+ )
409
+ col_idx = torch.arange(seqlen_k, device=kv.device, dtype=torch.long)
410
+ sk = (
411
+ seqlen_k
412
+ if key_padding_mask is None
413
+ else rearrange(key_padding_mask.sum(-1), "b -> b 1 1 1")
414
+ )
415
+ causal_mask = col_idx > row_idx + sk - seqlen_q
416
+ scores = scores.masked_fill(causal_mask, -10000.0)
417
+ attention = torch.softmax(scores, dim=-1, dtype=v.dtype)
418
+ attention_drop = self.drop(attention)
419
+ output = torch.einsum("bhts,bshd->bthd", attention_drop, v)
420
+ return output
421
+
422
+
423
+ class LinearResidual(nn.Linear):
424
+ """Wrap nn.Linear to return the residual as well. For compatibility with FusedDense."""
425
+
426
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
427
+ return super().forward(input), input
428
+
429
+
430
+ def _update_kv_cache(kv, inference_params, layer_idx):
431
+ """kv: (batch_size, seqlen, 2, nheads, head_dim) or (batch_size, 1, 2, nheads, head_dim)"""
432
+ # Pre-allocate memory for key-values for inference.
433
+ num_heads, head_dim = kv.shape[-2:]
434
+ if layer_idx not in inference_params.key_value_memory_dict:
435
+ kv_cache = torch.empty(
436
+ inference_params.max_batch_size,
437
+ inference_params.max_seqlen,
438
+ 2,
439
+ num_heads,
440
+ head_dim,
441
+ dtype=kv.dtype,
442
+ device=kv.device,
443
+ )
444
+ inference_params.key_value_memory_dict[layer_idx] = kv_cache
445
+ else:
446
+ kv_cache = inference_params.key_value_memory_dict[layer_idx]
447
+ # Adjust key and value for inference
448
+ batch_start = inference_params.batch_size_offset
449
+ batch_end = batch_start + kv.shape[0]
450
+ sequence_start = inference_params.seqlen_offset
451
+ sequence_end = sequence_start + kv.shape[1]
452
+ assert batch_end <= kv_cache.shape[0]
453
+ assert sequence_end <= kv_cache.shape[1]
454
+ assert kv_cache is not None
455
+ kv_cache[batch_start:batch_end, sequence_start:sequence_end, ...] = kv
456
+ return kv_cache[batch_start:batch_end, :sequence_end, ...]
457
+
458
+
459
+ class MHA(nn.Module):
460
+ """Multi-head self-attention and cross-attention"""
461
+
462
+ def __init__(
463
+ self,
464
+ embed_dim,
465
+ num_heads,
466
+ num_heads_kv=None,
467
+ cross_attn=False,
468
+ qkv_proj_bias=True,
469
+ out_proj_bias=True,
470
+ dropout=0.0,
471
+ softmax_scale=None,
472
+ causal=False,
473
+ layer_idx=None,
474
+ dwconv=False,
475
+ rotary_emb_dim=0,
476
+ rotary_emb_base=10000.0,
477
+ rotary_emb_scale_base=None,
478
+ rotary_emb_interleaved=False,
479
+ use_alibi=False,
480
+ window_size=(-1, -1),
481
+ fused_bias_fc=False,
482
+ use_flash_attn=False,
483
+ return_residual=False,
484
+ checkpointing=False,
485
+ device=None,
486
+ dtype=None,
487
+ qk_norm=False,
488
+ qk_norm_kwargs=None,
489
+ ) -> None:
490
+ """
491
+ num_heads_kv: can be used to toggle MQA / GQA. If None, use num_heads.
492
+ return_residual: whether to return the input x along with the output. This is for
493
+ performance reason: for post-norm architecture, returning the input allows us
494
+ to fuse the backward of nn.Linear with the residual connection.
495
+ """
496
+ if qk_norm and cross_attn:
497
+ raise NotImplementedError('QK normalization is only implemented for self-attention.')
498
+ if qk_norm:
499
+ qk_norm_kwargs = qk_norm_kwargs if qk_norm_kwargs is not None else {}
500
+ qk_norm_kwargs.update({'num_heads': num_heads, 'head_dim': embed_dim // num_heads})
501
+ factory_kwargs = {"device": device, "dtype": dtype}
502
+ super().__init__()
503
+ self.embed_dim = embed_dim
504
+ self.cross_attn = cross_attn
505
+ self.causal = causal
506
+ self.layer_idx = layer_idx
507
+ self.dwconv = dwconv
508
+ self.rotary_emb_dim = rotary_emb_dim
509
+ self.use_flash_attn = use_flash_attn
510
+ self.return_residual = return_residual
511
+ self.checkpointing = checkpointing
512
+ if use_alibi:
513
+ assert not cross_attn or use_flash_attn, "ALiBi code path requires self-attention or cross-attention with flash_attn"
514
+ alibi_slopes = torch.tensor(get_alibi_slopes(num_heads), device=device)
515
+ else:
516
+ alibi_slopes = None
517
+
518
+ if isinstance(window_size, list):
519
+ window_size = tuple(window_size)
520
+
521
+ if window_size != (-1, -1):
522
+ assert use_flash_attn, "Local (sliding window) attention code path requires flash_attn"
523
+
524
+ self.num_heads = num_heads
525
+ self.num_heads_kv = num_heads_kv if num_heads_kv is not None else num_heads
526
+ assert (
527
+ self.num_heads % self.num_heads_kv == 0
528
+ ), "num_heads must be divisible by num_heads_kv"
529
+ assert self.embed_dim % num_heads == 0, "embed_dim must be divisible by num_heads"
530
+ self.head_dim = self.embed_dim // num_heads
531
+ qkv_dim = self.head_dim * (self.num_heads + 2 * self.num_heads_kv)
532
+ kv_dim = 2 * self.head_dim * self.num_heads_kv
533
+
534
+ if self.rotary_emb_dim > 0:
535
+ assert not cross_attn, "MHA with rotary embedding does not support cross-attention yet"
536
+ assert RotaryEmbedding is not None, "rotary_emb is not installed"
537
+ self.rotary_emb = RotaryEmbedding(
538
+ self.rotary_emb_dim,
539
+ base=rotary_emb_base,
540
+ scale_base=rotary_emb_scale_base,
541
+ interleaved=rotary_emb_interleaved,
542
+ device=device,
543
+ )
544
+
545
+ if fused_bias_fc and FusedDense is None:
546
+ raise ImportError("fused_dense is not installed")
547
+ linear_cls = nn.Linear if not fused_bias_fc else FusedDense
548
+ linear_resid_cls = (
549
+ LinearResidual if not fused_bias_fc else partial(FusedDense, return_residual=True)
550
+ )
551
+ wqkv_cls = linear_cls if not self.return_residual else linear_resid_cls
552
+ inner_attn_cls = (
553
+ partial(FlashSelfAttention, alibi_slopes=alibi_slopes, window_size=window_size, qk_norm_kwargs=qk_norm_kwargs)
554
+ if use_flash_attn
555
+ else partial(SelfAttention, alibi_slopes=alibi_slopes, qk_norm_kwargs=qk_norm_kwargs)
556
+ )
557
+ inner_cross_attn_cls = (
558
+ partial(FlashCrossAttention, alibi_slopes=alibi_slopes, window_size=window_size)
559
+ if use_flash_attn
560
+ else CrossAttention
561
+ )
562
+ if not self.cross_attn:
563
+ self.Wqkv = wqkv_cls(embed_dim, qkv_dim, bias=qkv_proj_bias, **factory_kwargs)
564
+ else:
565
+ self.Wq = linear_cls(embed_dim, embed_dim, bias=qkv_proj_bias, **factory_kwargs)
566
+ self.Wkv = wqkv_cls(embed_dim, kv_dim, bias=qkv_proj_bias, **factory_kwargs)
567
+ if self.dwconv:
568
+ if self.num_heads_kv == self.num_heads:
569
+ self.dwconv_qkv = nn.Conv1d(
570
+ qkv_dim, qkv_dim, kernel_size=3, padding=2, groups=qkv_dim
571
+ )
572
+ else:
573
+ self.dwconv_q = nn.Conv1d(
574
+ embed_dim, embed_dim, kernel_size=3, padding=2, groups=embed_dim
575
+ )
576
+ self.dwconv_kv = nn.Conv1d(kv_dim, kv_dim, kernel_size=3, padding=2, groups=kv_dim)
577
+ self.inner_attn = inner_attn_cls(
578
+ causal=causal,
579
+ softmax_scale=softmax_scale,
580
+ attention_dropout=dropout,
581
+ )
582
+ self.inner_cross_attn = inner_cross_attn_cls(
583
+ causal=causal, softmax_scale=softmax_scale, attention_dropout=dropout
584
+ )
585
+ self.out_proj = linear_cls(embed_dim, embed_dim, bias=out_proj_bias, **factory_kwargs)
586
+
587
+ def allocate_inference_cache(self, batch_size, max_seqlen, dtype=None):
588
+ dtype = self.out_proj.weight.dtype if dtype is None else dtype
589
+ device = self.out_proj.weight.device
590
+ return torch.empty(
591
+ batch_size,
592
+ max_seqlen,
593
+ 2,
594
+ self.num_heads_kv,
595
+ self.head_dim,
596
+ dtype=dtype,
597
+ device=device,
598
+ )
599
+
600
+ def _update_kv_cache(self, kv, inference_params):
601
+ """kv: (batch_size, seqlen, 2, nheads, head_dim) or (batch_size, 1, 2, nheads, head_dim)"""
602
+ assert not self.dwconv, "Generation does not support dwconv yet"
603
+ assert self.layer_idx is not None, "Generation requires layer_idx in the constructor"
604
+ return _update_kv_cache(kv, inference_params, self.layer_idx)
605
+
606
+ def _apply_rotary_update_kvcache_attention(self, q, kv, inference_params):
607
+ """
608
+ Fast path that combine 3 steps: apply rotary to Q and K, update kv cache, and apply attention.
609
+ q: (batch_size, seqlen_q, nheads, head_dim)
610
+ kv: (batch_size, seqlen_k, 2, nheads_kv, head_dim)
611
+ """
612
+ assert inference_params is not None and inference_params.seqlen_offset > 0
613
+ assert self.use_flash_attn
614
+ if self.rotary_emb_dim > 0:
615
+ assert self.rotary_emb.scale is None, "This code path does not support xPos"
616
+ self.rotary_emb._update_cos_sin_cache(
617
+ inference_params.max_seqlen, device=q.device, dtype=q.dtype
618
+ )
619
+ rotary_cos, rotary_sin = self.rotary_emb._cos_cached, self.rotary_emb._sin_cached
620
+ else:
621
+ rotary_cos, rotary_sin = None, None
622
+ batch = q.shape[0]
623
+ kv_cache = inference_params.key_value_memory_dict[self.layer_idx][:batch]
624
+ cache_seqlens = (
625
+ inference_params.lengths_per_sample[:batch]
626
+ if inference_params.lengths_per_sample is not None
627
+ else inference_params.seqlen_offset
628
+ )
629
+ alibi_slopes = getattr(self.inner_cross_attn, "alibi_slopes", None)
630
+ context = flash_attn_with_kvcache(
631
+ q,
632
+ kv_cache[:, :, 0],
633
+ kv_cache[:, :, 1],
634
+ kv[:, :, 0],
635
+ kv[:, :, 1],
636
+ rotary_cos=rotary_cos,
637
+ rotary_sin=rotary_sin,
638
+ cache_seqlens=cache_seqlens,
639
+ softmax_scale=self.inner_cross_attn.softmax_scale,
640
+ causal=self.inner_cross_attn.causal,
641
+ rotary_interleaved=self.rotary_emb.interleaved if self.rotary_emb_dim > 0 else False,
642
+ alibi_slopes=alibi_slopes,
643
+ )
644
+ return context
645
+
646
+ def _update_kvcache_attention(self, q, kv, inference_params):
647
+ """Write kv to inference_params, then do attention"""
648
+ if (
649
+ inference_params.seqlen_offset == 0
650
+ or flash_attn_with_kvcache is None
651
+ or not self.use_flash_attn
652
+ ):
653
+ # TODO: this only uses seqlen_offset and not lengths_per_sample.
654
+ kv = self._update_kv_cache(kv, inference_params)
655
+ return self.inner_cross_attn(q, kv)
656
+ else:
657
+ batch = q.shape[0]
658
+ kv_cache = inference_params.key_value_memory_dict[self.layer_idx][:batch]
659
+ cache_seqlens = (
660
+ inference_params.lengths_per_sample[:batch]
661
+ if inference_params.lengths_per_sample is not None
662
+ else inference_params.seqlen_offset
663
+ )
664
+ alibi_slopes = getattr(self.inner_cross_attn, "alibi_slopes", None)
665
+ return flash_attn_with_kvcache(
666
+ q,
667
+ kv_cache[:, :, 0],
668
+ kv_cache[:, :, 1],
669
+ kv[:, :, 0],
670
+ kv[:, :, 1],
671
+ cache_seqlens=cache_seqlens,
672
+ softmax_scale=self.inner_cross_attn.softmax_scale,
673
+ causal=self.inner_cross_attn.causal,
674
+ alibi_slopes=alibi_slopes,
675
+ )
676
+
677
+ def forward(
678
+ self,
679
+ x,
680
+ x_kv=None,
681
+ key_padding_mask=None,
682
+ cu_seqlens=None,
683
+ max_seqlen=None,
684
+ mixer_subset=None,
685
+ inference_params=None,
686
+ **kwargs,
687
+ ):
688
+ """
689
+ Arguments:
690
+ x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim) if
691
+ cu_seqlens is None and max_seqlen is None, else (total, hidden_dim) where total
692
+ is the is the sum of the sequence lengths in the batch.
693
+ x_kv: (batch, seqlen, hidden_dim), only applicable for cross-attention. If None, use x.
694
+ cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
695
+ of the sequences in the batch, used to index into x. Only applicable when using
696
+ FlashAttention.
697
+ max_seqlen: int. Maximum sequence length in the batch.
698
+ key_padding_mask: boolean mask, True means to keep, False means to mask out.
699
+ (batch, seqlen). Only applicable when not using FlashAttention.
700
+ mixer_subset: for cross-attention only. If not None, will take a subset of x
701
+ before applying the query projection. Useful for e.g., ViT where we only care
702
+ about the CLS token in the last layer.
703
+ inference_params: for generation. Adapted from Megatron-LM (and Apex)
704
+ https://github.com/NVIDIA/apex/blob/3ff1a10f72ec07067c4e44759442329804ac5162/apex/transformer/testing/standalone_transformer_lm.py#L470
705
+ """
706
+ if cu_seqlens is not None:
707
+ assert max_seqlen is not None
708
+ assert key_padding_mask is None
709
+ assert self.use_flash_attn
710
+ assert not self.dwconv
711
+ assert self.rotary_emb_dim == 0
712
+ if key_padding_mask is not None:
713
+ assert cu_seqlens is None
714
+ assert max_seqlen is None
715
+ assert not self.use_flash_attn
716
+ if inference_params is not None:
717
+ assert key_padding_mask is None
718
+ assert cu_seqlens is None and max_seqlen is None
719
+ assert not self.dwconv
720
+
721
+ kwargs = (
722
+ {"cu_seqlens": cu_seqlens, "max_seqlen": max_seqlen, **kwargs}
723
+ if self.use_flash_attn
724
+ else {"key_padding_mask": key_padding_mask, **kwargs}
725
+ )
726
+ seqlen_offset = (
727
+ 0
728
+ if inference_params is None
729
+ else (
730
+ inference_params.lengths_per_sample
731
+ if inference_params.lengths_per_sample is not None
732
+ else inference_params.seqlen_offset
733
+ )
734
+ )
735
+ rotary_max_seqlen = inference_params.max_seqlen if inference_params is not None else None
736
+ batch, seqlen = x.shape[:2]
737
+ if not self.cross_attn and self.num_heads_kv == self.num_heads:
738
+ assert x_kv is None and mixer_subset is None
739
+ if not self.return_residual:
740
+ qkv = self.Wqkv(x)
741
+ else:
742
+ qkv, x = self.Wqkv(x)
743
+ if self.dwconv:
744
+ qkv = rearrange(
745
+ self.dwconv_qkv(rearrange(qkv, "b s d -> b d s"))[..., :-2], "b d s -> b s d"
746
+ ).contiguous()
747
+ qkv = rearrange(qkv, "... (three h d) -> ... three h d", three=3, d=self.head_dim)
748
+ if (
749
+ inference_params is None
750
+ or inference_params.seqlen_offset == 0
751
+ or (self.rotary_emb_dim == 0 or self.rotary_emb_dim % 16 != 0)
752
+ or not self.use_flash_attn
753
+ ):
754
+ if self.rotary_emb_dim > 0:
755
+ qkv = self.rotary_emb(
756
+ qkv, seqlen_offset=seqlen_offset, max_seqlen=rotary_max_seqlen
757
+ )
758
+ if inference_params is None:
759
+ if not self.checkpointing:
760
+ context = self.inner_attn(qkv, **kwargs)
761
+ else:
762
+ context = torch.utils.checkpoint.checkpoint(self.inner_attn, qkv, use_reentrant=False, **kwargs)
763
+ else:
764
+ context = self._update_kvcache_attention(
765
+ qkv[:, :, 0], qkv[:, :, 1:], inference_params
766
+ )
767
+ else:
768
+ context = self._apply_rotary_update_kvcache_attention(
769
+ qkv[:, :, 0], qkv[:, :, 1:], inference_params
770
+ )
771
+ else:
772
+ if self.cross_attn:
773
+ if not self.return_residual:
774
+ q = self.Wq(x if mixer_subset is None else x[:, mixer_subset])
775
+ kv = self.Wkv(x_kv if x_kv is not None else x)
776
+ else:
777
+ if x_kv is not None:
778
+ kv, x_kv = self.Wkv(x_kv)
779
+ else:
780
+ kv, x = self.Wkv(x)
781
+ q = self.Wq(x if mixer_subset is None else x[:, mixer_subset])
782
+ else:
783
+ assert self.num_heads_kv != self.num_heads
784
+ if not self.return_residual:
785
+ qkv = self.Wqkv(x)
786
+ else:
787
+ qkv, x = self.Wqkv(x)
788
+ q = qkv[..., : self.num_heads * self.head_dim]
789
+ kv = qkv[..., self.num_heads * self.head_dim :]
790
+ q = rearrange(q, "... (h d) -> ... h d", d=self.head_dim)
791
+ kv = rearrange(kv, "... (two hkv d) -> ... two hkv d", two=2, d=self.head_dim)
792
+ if self.dwconv:
793
+ q = rearrange(
794
+ self.dwconv_q(rearrange(q, "b s d -> b d s"))[..., :-2], "b d s -> b s d"
795
+ ).contiguous()
796
+ kv = rearrange(
797
+ self.dwconv_kv(rearrange(kv, "b s d -> b d s"))[..., :-2], "b d s -> b s d"
798
+ ).contiguous()
799
+ if (
800
+ inference_params is None
801
+ or inference_params.seqlen_offset == 0
802
+ or (self.rotary_emb_dim == 0 or self.rotary_emb_dim % 16 != 0)
803
+ or not self.use_flash_attn
804
+ ):
805
+ if self.rotary_emb_dim > 0:
806
+ q, kv = self.rotary_emb(
807
+ q, kv, seqlen_offset=seqlen_offset, max_seqlen=rotary_max_seqlen
808
+ )
809
+ if inference_params is None:
810
+ if not self.checkpointing:
811
+ context = self.inner_cross_attn(q, kv, **kwargs)
812
+ else:
813
+ context = torch.utils.checkpoint.checkpoint(
814
+ self.inner_cross_attn, q, kv, use_reentrant=False, **kwargs
815
+ )
816
+ else:
817
+ context = self._update_kvcache_attention(q, kv, inference_params)
818
+ else:
819
+ context = self._apply_rotary_update_kvcache_attention(q, kv, inference_params)
820
+ out = self.out_proj(rearrange(context, "... h d -> ... (h d)"))
821
+ return out if not self.return_residual else (out, x)
mlp.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) 2023, Tri Dao.
2
+
3
+ """"
4
+ The implementation was adopted from
5
+ https://github.com/Dao-AILab/flash-attention/blob/43950dda456e095969d842fca7a73c5bfe3cecd0
6
+ """
7
+
8
+ import torch
9
+ import torch.nn as nn
10
+ import torch.nn.functional as F
11
+ from torch.distributed import ProcessGroup
12
+
13
+
14
+ try:
15
+ from flash_attn.ops.activations import swiglu
16
+ except ImportError:
17
+ swiglu = None
18
+
19
+ try:
20
+ from flash_attn.ops.fused_dense import ColumnParallelLinear, RowParallelLinear
21
+ except ImportError:
22
+ ColumnParallelLinear, RowParallelLinear = None, None
23
+
24
+ try:
25
+ from flash_attn.ops.fused_dense import FusedMLP, ParallelFusedMLP
26
+ except ImportError:
27
+ FusedMLP, ParallelFusedMLP = None, None
28
+
29
+
30
+ class GLUMLP(nn.Module):
31
+ def __init__(
32
+ self,
33
+ in_features,
34
+ hidden_features,
35
+ activation,
36
+ use_flash_attn,
37
+ return_residual=False,
38
+ hidden_dropout_prob=0.1
39
+ ):
40
+ super().__init__()
41
+ self.hidden_features = hidden_features
42
+ self.gated_layers = nn.Linear(
43
+ in_features, hidden_features * 2, bias=False
44
+ )
45
+ if activation == 'relu':
46
+ self.act = nn.ReLU()
47
+ elif activation == 'gelu':
48
+ self.act = nn.GELU()
49
+ else:
50
+ raise ValueError(
51
+ f"activation {activation} not supported"
52
+ )
53
+ self.wo = nn.Linear(hidden_features, in_features)
54
+ self.dropout = nn.Dropout(hidden_dropout_prob)
55
+ self.return_residual = return_residual
56
+ self.use_flash_attn = use_flash_attn
57
+ #self.layernorm = nn.LayerNorm(in_features, eps=layer_norm_eps)
58
+
59
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
60
+ residual_connection = hidden_states
61
+ # compute the activation
62
+ hidden_states = self.gated_layers(hidden_states)
63
+ if self.use_flash_attn:
64
+ gated = hidden_states[:, : self.hidden_features]
65
+ non_gated = hidden_states[:, self.hidden_features :]
66
+ else:
67
+ gated = hidden_states[:, :, : self.hidden_features]
68
+ non_gated = hidden_states[:, :, self.hidden_features :]
69
+ hidden_states = self.act(gated) * non_gated
70
+ hidden_states = self.dropout(hidden_states)
71
+ # multiply by the second matrix
72
+ hidden_states = self.wo(hidden_states)
73
+ # add the residual connection and post-LN
74
+ # hidden_states = self.layernorm(hidden_states + residual_connection)
75
+ return hidden_states if not self.return_residual else (hidden_states, residual_connection)
76
+
77
+ class Mlp(nn.Module):
78
+ def __init__(
79
+ self,
80
+ in_features,
81
+ hidden_features=None,
82
+ out_features=None,
83
+ activation=F.gelu,
84
+ bias1=True,
85
+ bias2=True,
86
+ return_residual=False,
87
+ device=None,
88
+ dtype=None,
89
+ ):
90
+ factory_kwargs = {"device": device, "dtype": dtype}
91
+ super().__init__()
92
+ out_features = out_features if out_features is not None else in_features
93
+ hidden_features = hidden_features if hidden_features is not None else in_features * 4
94
+ self.return_residual = return_residual
95
+ self.fc1 = nn.Linear(in_features, hidden_features, bias=bias1, **factory_kwargs)
96
+ self.activation = activation
97
+ self.fc2 = nn.Linear(hidden_features, out_features, bias=bias2, **factory_kwargs)
98
+
99
+ def forward(self, x):
100
+ y = self.fc1(x)
101
+ y = self.activation(y)
102
+ y = self.fc2(y)
103
+ return y if not self.return_residual else (y, x)
104
+
105
+
106
+ class ParallelMLP(nn.Module):
107
+ def __init__(
108
+ self,
109
+ in_features,
110
+ hidden_features=None,
111
+ out_features=None,
112
+ activation=F.gelu,
113
+ process_group: ProcessGroup = None,
114
+ sequence_parallel=True,
115
+ bias1=True,
116
+ bias2=True,
117
+ device=None,
118
+ dtype=None,
119
+ ):
120
+ factory_kwargs = {"device": device, "dtype": dtype}
121
+ super().__init__()
122
+ assert ColumnParallelLinear is not None, "Need to install fused_dense"
123
+ assert RowParallelLinear is not None, "Need to install fused_dense"
124
+ out_features = out_features if out_features is not None else in_features
125
+ hidden_features = hidden_features if hidden_features is not None else in_features * 4
126
+ self.fc1 = ColumnParallelLinear(
127
+ in_features,
128
+ hidden_features,
129
+ process_group,
130
+ bias=bias1,
131
+ sequence_parallel=sequence_parallel,
132
+ **factory_kwargs,
133
+ )
134
+ self.activation = activation
135
+ self.fc2 = RowParallelLinear(
136
+ hidden_features,
137
+ out_features,
138
+ process_group,
139
+ bias=bias2,
140
+ sequence_parallel=sequence_parallel,
141
+ **factory_kwargs,
142
+ )
143
+
144
+ def forward(self, x):
145
+ y = self.fc1(x)
146
+ y = self.activation(y)
147
+ y = self.fc2(y)
148
+ return y
149
+
150
+
151
+ class GatedMlp(nn.Module):
152
+ def __init__(
153
+ self,
154
+ in_features,
155
+ hidden_features=None,
156
+ out_features=None,
157
+ activation=F.sigmoid,
158
+ bias1=True,
159
+ bias2=True,
160
+ multiple_of=128,
161
+ return_residual=False,
162
+ device=None,
163
+ dtype=None,
164
+ ):
165
+ factory_kwargs = {"device": device, "dtype": dtype}
166
+ super().__init__()
167
+ out_features = out_features if out_features is not None else in_features
168
+ hidden_features = (
169
+ hidden_features if hidden_features is not None else int(8 * in_features / 3)
170
+ )
171
+ hidden_features = (hidden_features + multiple_of - 1) // multiple_of * multiple_of
172
+ self.return_residual = return_residual
173
+ self.fc1 = nn.Linear(in_features, 2 * hidden_features, bias=bias1, **factory_kwargs)
174
+ self.activation = activation
175
+ self.fc2 = nn.Linear(hidden_features, out_features, bias=bias2, **factory_kwargs)
176
+
177
+ def forward(self, x):
178
+ y = self.fc1(x)
179
+ if self.activation == F.sigmoid: # Special case for GLU
180
+ y = F.glu(y, dim=-1)
181
+ elif self.activation == F.silu and swiglu is not None: # Special case for SwiGLU
182
+ y, gate = y.chunk(2, dim=-1)
183
+ y = swiglu(gate, y)
184
+ else:
185
+ y, gate = y.chunk(2, dim=-1)
186
+ y = y * self.activation(gate)
187
+ y = self.fc2(y)
188
+ return y if not self.return_residual else (y, x)
189
+
190
+
191
+ class ParallelGatedMlp(nn.Module):
192
+ """Parallel GatedMlp"""
193
+
194
+ def __init__(
195
+ self,
196
+ in_features,
197
+ process_group,
198
+ hidden_features=None,
199
+ out_features=None,
200
+ activation=F.sigmoid,
201
+ bias1=True,
202
+ bias2=True,
203
+ multiple_of=128,
204
+ sequence_parallel=True,
205
+ device=None,
206
+ dtype=None,
207
+ ):
208
+ factory_kwargs = {"device": device, "dtype": dtype}
209
+ super().__init__()
210
+ out_features = out_features if out_features is not None else in_features
211
+ hidden_features = (
212
+ hidden_features if hidden_features is not None else int(8 * in_features / 3)
213
+ )
214
+ hidden_features = (hidden_features + multiple_of - 1) // multiple_of * multiple_of
215
+ if ColumnParallelLinear is None or RowParallelLinear is None:
216
+ raise ImportError("fused_dense is not installed")
217
+ self.fc1 = ColumnParallelLinear(
218
+ in_features,
219
+ 2 * hidden_features,
220
+ process_group,
221
+ bias=bias1,
222
+ sequence_parallel=sequence_parallel,
223
+ **factory_kwargs,
224
+ )
225
+ self.activation = activation
226
+ self.fc2 = RowParallelLinear(
227
+ hidden_features,
228
+ out_features,
229
+ process_group,
230
+ bias=bias2,
231
+ sequence_parallel=sequence_parallel,
232
+ **factory_kwargs,
233
+ )
234
+
235
+ def forward(self, x):
236
+ y = self.fc1(x)
237
+ if self.activation == F.sigmoid: # Special case for GLU
238
+ y = F.glu(y, dim=-1)
239
+ else:
240
+ y, gate = y.chunk(2, dim=-1)
241
+ y = y * self.activation(gate)
242
+ y = self.fc2(y)
243
+ return y
modeling_bert.py ADDED
@@ -0,0 +1,806 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Implementation of BERT, using ALiBi and Flash Attention
2
+
3
+ The implementation was adopted from
4
+ https://github.com/Dao-AILab/flash-attention/blob/43950dda456e095969d842fca7a73c5bfe3cecd0/flash_attn/models/bert.py
5
+ and made modifications to use ALiBi.
6
+ """
7
+
8
+ # Copyright (c) 2022, Tri Dao.
9
+ # This BERT implementation is based on our MLPerf 2.0 and MLPerf 2.1 BERT implementation.
10
+ # https://github.com/mlcommons/training_results_v2.0/blob/main/HazyResearch/benchmarks/bert/implementations/pytorch/modeling.py
11
+ # https://github.com/mlcommons/training_results_v2.1/blob/main/Azure-HazyResearch/benchmarks/bert/implementations/ND96amsr_A100_v4/modeling.py
12
+
13
+ # Inspired by https://github.com/huggingface/transformers/blob/main/src/transformers/models/bert/modeling_bert.py
14
+
15
+ import logging
16
+ from collections.abc import Sequence
17
+ from functools import partial
18
+ from typing import Union, List, Optional
19
+ import warnings
20
+
21
+ import numpy as np
22
+ import torch
23
+ import torch.nn as nn
24
+ import torch.nn.functional as F
25
+ from einops import rearrange
26
+ from transformers.modeling_utils import PreTrainedModel
27
+ from .configuration_bert import JinaBertConfig
28
+ from transformers.models.bert.modeling_bert import (
29
+ BaseModelOutputWithPoolingAndCrossAttentions,
30
+ BertForPreTrainingOutput,
31
+ )
32
+ from .bert_padding import (
33
+ index_first_axis,
34
+ index_first_axis_residual,
35
+ pad_input,
36
+ unpad_input,
37
+ )
38
+
39
+ from .block import Block
40
+ from .embedding import BertEmbeddings
41
+ from .mha import MHA
42
+ from .mlp import FusedMLP, Mlp, GLUMLP
43
+
44
+ try:
45
+ from flash_attn.ops.fused_dense import FusedDense
46
+ except ImportError:
47
+ FusedDense = None
48
+
49
+ try:
50
+ from flash_attn.ops.triton.layer_norm import layer_norm_fn
51
+ except ImportError:
52
+ layer_norm_fn = None
53
+
54
+
55
+ try:
56
+ from flash_attn.losses.cross_entropy import CrossEntropyLoss
57
+ except ImportError:
58
+ CrossEntropyLoss = None
59
+
60
+ try:
61
+ from tqdm.autonotebook import trange
62
+ except ImportError:
63
+ trange = None
64
+
65
+ logger = logging.getLogger(__name__)
66
+
67
+
68
+ def create_mixer_cls(config, cross_attn=False, return_residual=False):
69
+ use_flash_attn = config.use_flash_attn if config.use_flash_attn is not None else torch.cuda.is_available()
70
+ use_qk_norm = config.use_qk_norm
71
+ fused_bias_fc = config.fused_bias_fc
72
+ window_size = config.window_size
73
+ mixer_cls = partial(
74
+ MHA,
75
+ num_heads=config.num_attention_heads,
76
+ cross_attn=cross_attn,
77
+ dropout=config.attention_probs_dropout_prob,
78
+ causal=False,
79
+ fused_bias_fc=fused_bias_fc,
80
+ use_flash_attn=use_flash_attn,
81
+ return_residual=return_residual,
82
+ use_alibi=True,
83
+ window_size=window_size,
84
+ qk_norm=use_qk_norm,
85
+ checkpointing=False,
86
+ )
87
+ return mixer_cls
88
+
89
+
90
+ def create_mlp_cls(config, layer_idx=None, return_residual=False):
91
+ inner_dim = config.intermediate_size
92
+ mlp_type = config.mlp_type
93
+ assert mlp_type in ('mlp', 'fused_mlp', 'glu')
94
+ if mlp_type == 'fused_mlp':
95
+ assert config.hidden_act in ["gelu_new", "gelu_fast", "gelu_pytorch_tanh"], (
96
+ "fused_mlp only " "supports approximate gelu"
97
+ )
98
+ if mlp_type == 'glu':
99
+ assert config.hidden_act in ('relu', 'gelu')
100
+ if mlp_type == 'mlp':
101
+ approximate = (
102
+ "tanh"
103
+ if config.hidden_act in ["gelu_new", "gelu_fast", "gelu_pytorch_tanh"]
104
+ else "none"
105
+ )
106
+ mlp_cls = partial(
107
+ Mlp,
108
+ hidden_features=inner_dim,
109
+ activation=partial(F.gelu, approximate=approximate),
110
+ return_residual=return_residual,
111
+ )
112
+ elif mlp_type == 'glu':
113
+ mlp_cls = partial(
114
+ GLUMLP,
115
+ hidden_features=inner_dim,
116
+ activation=config.hidden_act,
117
+ use_flash_attn=config.use_flash_attn,
118
+ hidden_dropout_prob=config.hidden_dropout_prob,
119
+ return_residual=return_residual,
120
+ )
121
+ elif mlp_type == 'fused_mlp':
122
+ if FusedMLP is None:
123
+ raise ImportError("fused_dense is not installed")
124
+ mlp_checkpoint_lvl = getattr(config, "mlp_checkpoint_lvl", 0)
125
+ # mlp_checkpoint_lvl could be a list, which contains the checkpoint_lvl for each layer
126
+ if isinstance(mlp_checkpoint_lvl, Sequence):
127
+ assert layer_idx is not None
128
+ mlp_checkpoint_lvl = mlp_checkpoint_lvl[layer_idx]
129
+ mlp_cls = partial(
130
+ FusedMLP,
131
+ hidden_features=inner_dim,
132
+ checkpoint_lvl=mlp_checkpoint_lvl,
133
+ return_residual=return_residual,
134
+ )
135
+ else:
136
+ raise NotImplementedError
137
+ return mlp_cls
138
+
139
+
140
+ def create_block(config, layer_idx=None):
141
+ last_layer_subset = getattr(config, "last_layer_subset", False)
142
+ cross_attn = last_layer_subset and layer_idx == config.num_hidden_layers - 1
143
+ # TD [2022-12-19]: For cross attention (last layer), we actually want to return the
144
+ # residual x_kv, not residual x. But it's annoying to change the API (and it only affects
145
+ # one layer) so we just choose not to return residual in this case.
146
+ return_residual = not cross_attn
147
+ mixer_cls = create_mixer_cls(config, cross_attn, return_residual=return_residual)
148
+ mlp_cls = create_mlp_cls(config, layer_idx, return_residual=return_residual)
149
+ norm_cls = partial(nn.LayerNorm, eps=config.layer_norm_eps)
150
+ block = Block(
151
+ config.hidden_size,
152
+ mixer_cls,
153
+ mlp_cls,
154
+ norm_cls=norm_cls,
155
+ prenorm=False,
156
+ resid_dropout1=config.hidden_dropout_prob,
157
+ resid_dropout2=config.hidden_dropout_prob,
158
+ fused_dropout_add_ln=getattr(config, "fused_dropout_add_ln", False),
159
+ return_residual=return_residual,
160
+ )
161
+ return block
162
+
163
+
164
+ # https://github.com/huggingface/transformers/blob/7032e0203262ebb2ebf55da8d2e01f873973e835/src/transformers/models/bert/modeling_bert.py#L748
165
+ def _init_weights(module, initializer_range=0.02):
166
+ if isinstance(module, nn.Linear):
167
+ nn.init.normal_(module.weight, std=initializer_range)
168
+ if module.bias is not None:
169
+ nn.init.zeros_(module.bias)
170
+ elif isinstance(module, nn.Embedding):
171
+ nn.init.normal_(module.weight, std=initializer_range)
172
+ if module.padding_idx is not None:
173
+ nn.init.zeros_(module.weight[module.padding_idx])
174
+
175
+
176
+ class BertEncoder(nn.Module):
177
+ def __init__(self, config: JinaBertConfig):
178
+ super().__init__()
179
+ self.use_flash_attn = config.use_flash_attn if config.use_flash_attn is not None else torch.cuda.is_available()
180
+ self.layers = nn.ModuleList(
181
+ [create_block(config, layer_idx=i) for i in range(config.num_hidden_layers)]
182
+ )
183
+ self._grad_checkpointing = False
184
+
185
+ @property
186
+ def gradient_checkpointing(self):
187
+ return self._grad_checkpointing
188
+
189
+ @gradient_checkpointing.setter
190
+ def gradient_checkpointing(self, value):
191
+ self._grad_checkpointing = value
192
+
193
+ def forward(self, hidden_states, key_padding_mask=None, subset_mask=None):
194
+ """If subset_mask is not None, we only want output for the subset of the sequence.
195
+ This means that we only compute the last layer output for these tokens.
196
+ subset_mask: (batch, seqlen), dtype=torch.bool
197
+ """
198
+ if key_padding_mask is None or not self.use_flash_attn:
199
+ mixer_kwargs = (
200
+ {"key_padding_mask": key_padding_mask.bool()} if key_padding_mask is not None else None
201
+ )
202
+ for layer in self.layers:
203
+ if self._grad_checkpointing:
204
+ hidden_states = torch.utils.checkpoint.checkpoint(
205
+ layer,
206
+ hidden_states,
207
+ use_reentrant=False,
208
+ mixer_kwargs=mixer_kwargs
209
+ )
210
+ else:
211
+ hidden_states = layer(hidden_states, mixer_kwargs=mixer_kwargs)
212
+ if subset_mask is not None:
213
+ hidden_states = hidden_states[subset_mask]
214
+ else:
215
+ batch, seqlen = hidden_states.shape[:2]
216
+ hidden_states, indices, cu_seqlens, max_seqlen_in_batch = unpad_input(
217
+ hidden_states, key_padding_mask
218
+ )
219
+ mixer_kwargs = {"cu_seqlens": cu_seqlens, "max_seqlen": max_seqlen_in_batch}
220
+ if subset_mask is None:
221
+ for layer in self.layers:
222
+ if self._grad_checkpointing:
223
+ hidden_states = torch.utils.checkpoint.checkpoint(
224
+ layer,
225
+ hidden_states,
226
+ use_reentrant=False,
227
+ mixer_kwargs=mixer_kwargs
228
+ )
229
+ else:
230
+ hidden_states = layer(hidden_states, mixer_kwargs=mixer_kwargs)
231
+ hidden_states = pad_input(hidden_states, indices, batch, seqlen)
232
+ else:
233
+ for layer in self.layers[:-1]:
234
+ if self._grad_checkpointing:
235
+ hidden_states = torch.utils.checkpoint.checkpoint(
236
+ layer,
237
+ hidden_states,
238
+ use_reentrant=False,
239
+ mixer_kwargs=mixer_kwargs
240
+ )
241
+ else:
242
+ hidden_states = layer(hidden_states, mixer_kwargs=mixer_kwargs)
243
+ if key_padding_mask is not None:
244
+ subset_idx = torch.nonzero(
245
+ subset_mask[key_padding_mask], as_tuple=False
246
+ ).flatten()
247
+ subset_seqlens = (subset_mask & key_padding_mask).sum(dim=-1, dtype=torch.int32)
248
+ subset_cu_seqlens = F.pad(
249
+ torch.cumsum(subset_seqlens, dim=0, dtype=torch.torch.int32), (1, 0)
250
+ )
251
+ else:
252
+ subset_idx = torch.nonzero(subset_mask, as_tuple=False).flatten()
253
+ subset_seqlens = subset_mask.sum(dim=-1, dtype=torch.int32)
254
+ subset_cu_seqlens = F.pad(
255
+ torch.cumsum(subset_seqlens, dim=0, dtype=torch.torch.int32), (1, 0)
256
+ )
257
+ hidden_states_subset, hidden_states = index_first_axis_residual(
258
+ hidden_states, subset_idx
259
+ )
260
+ # It's ok to set max_seqlen_q to be much larger
261
+ mixer_kwargs = {
262
+ "x_kv": hidden_states,
263
+ "cu_seqlens": subset_cu_seqlens,
264
+ "max_seqlen": max_seqlen_in_batch,
265
+ "cu_seqlens_k": cu_seqlens,
266
+ "max_seqlen_k": max_seqlen_in_batch,
267
+ }
268
+ if self._grad_checkpointing:
269
+ torch.utils.checkpoint.checkpoint(
270
+ self.layers[-1],
271
+ hidden_states_subset,
272
+ use_reentrant=False,
273
+ mixer_kwargs=mixer_kwargs
274
+ )
275
+ else:
276
+ hidden_states = self.layers[-1](hidden_states_subset, mixer_kwargs=mixer_kwargs)
277
+ return hidden_states
278
+
279
+
280
+ class BertPooler(nn.Module):
281
+ def __init__(self, config):
282
+ super().__init__()
283
+ fused_bias_fc = getattr(config, "fused_bias_fc", False)
284
+ if fused_bias_fc and FusedDense is None:
285
+ raise ImportError("fused_dense is not installed")
286
+ linear_cls = nn.Linear if not fused_bias_fc else FusedDense
287
+ self.dense = linear_cls(config.hidden_size, config.hidden_size)
288
+ self.activation = nn.Tanh()
289
+
290
+ def forward(self, hidden_states, pool=True):
291
+ # We "pool" the model by simply taking the hidden state corresponding
292
+ # to the first token.
293
+ first_token_tensor = hidden_states[:, 0] if pool else hidden_states
294
+ pooled_output = self.dense(first_token_tensor)
295
+ pooled_output = self.activation(pooled_output)
296
+ return pooled_output
297
+
298
+
299
+ class BertPredictionHeadTransform(nn.Module):
300
+ def __init__(self, config):
301
+ super().__init__()
302
+ fused_bias_fc = getattr(config, "fused_bias_fc", False)
303
+ if fused_bias_fc and FusedDense is None:
304
+ raise ImportError("fused_dense is not installed")
305
+ self.fused_dropout_add_ln = getattr(config, "fused_dropout_add_ln", False)
306
+ if self.fused_dropout_add_ln and layer_norm_fn is None:
307
+ raise ImportError("Triton is not installed")
308
+ linear_cls = nn.Linear if not fused_bias_fc else FusedDense
309
+ self.dense = linear_cls(config.hidden_size, config.hidden_size)
310
+ approximate = (
311
+ "tanh"
312
+ if config.hidden_act in ["gelu_new", "gelu_fast", "gelu_pytorch_tanh"]
313
+ else "none"
314
+ )
315
+ self.transform_act_fn = nn.GELU(approximate=approximate)
316
+ self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
317
+
318
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
319
+ hidden_states = self.dense(hidden_states)
320
+ hidden_states = self.transform_act_fn(hidden_states)
321
+ if not self.fused_dropout_add_ln:
322
+ hidden_states = self.layer_norm(hidden_states)
323
+ else:
324
+ hidden_states = layer_norm_fn(
325
+ hidden_states, self.layer_norm.weight, self.layer_norm.bias, eps=self.layer_norm.eps
326
+ )
327
+ return hidden_states
328
+
329
+
330
+ class BertLMPredictionHead(nn.Module):
331
+ def __init__(self, config):
332
+ super().__init__()
333
+ fused_bias_fc = getattr(config, "fused_bias_fc", False)
334
+ if fused_bias_fc and FusedDense is None:
335
+ raise ImportError("fused_dense is not installed")
336
+ linear_cls = nn.Linear if not fused_bias_fc else FusedDense
337
+
338
+ self.transform = BertPredictionHeadTransform(config)
339
+
340
+ # The output weights are the same as the input embeddings, but there is
341
+ # an output-only bias for each token.
342
+ self.decoder = linear_cls(config.hidden_size, config.vocab_size, bias=True)
343
+
344
+ def forward(self, hidden_states):
345
+ hidden_states = self.transform(hidden_states)
346
+ hidden_states = self.decoder(hidden_states)
347
+ return hidden_states
348
+
349
+
350
+ class BertPreTrainingHeads(nn.Module):
351
+ def __init__(self, config):
352
+ super().__init__()
353
+ self.predictions = BertLMPredictionHead(config)
354
+ self.seq_relationship = nn.Linear(config.hidden_size, 2)
355
+
356
+ def forward(self, sequence_output, pooled_output):
357
+ prediction_scores = self.predictions(sequence_output)
358
+ seq_relationship_score = self.seq_relationship(pooled_output)
359
+ return prediction_scores, seq_relationship_score
360
+
361
+
362
+ class BertPreTrainedModel(PreTrainedModel):
363
+ """An abstract class to handle weights initialization and
364
+ a simple interface for dowloading and loading pretrained models.
365
+ """
366
+ config_class = JinaBertConfig
367
+ base_model_prefix = "bert"
368
+ supports_gradient_checkpointing = True
369
+
370
+ def _set_gradient_checkpointing(self, module, value=False):
371
+ if isinstance(module, BertEncoder):
372
+ module.gradient_checkpointing = value
373
+
374
+
375
+ class BertModel(BertPreTrainedModel):
376
+ def __init__(self, config: JinaBertConfig, add_pooling_layer=True):
377
+ super().__init__(config)
378
+ self.pad_vocab_size_multiple = getattr(config, "pad_vocab_size_multiple", 1)
379
+ if config.vocab_size % self.pad_vocab_size_multiple != 0:
380
+ config.vocab_size += self.pad_vocab_size_multiple - (
381
+ config.vocab_size % self.pad_vocab_size_multiple
382
+ )
383
+ self.fused_dropout_add_ln = getattr(config, "fused_dropout_add_ln", False)
384
+ if self.fused_dropout_add_ln and layer_norm_fn is None:
385
+ raise ImportError("Triton is not installed")
386
+ assert config.hidden_act in ["gelu", "gelu_new", "gelu_fast", "gelu_pytorch_tanh"]
387
+
388
+ self.embeddings = BertEmbeddings(
389
+ config.hidden_size,
390
+ config.vocab_size,
391
+ -1, # No position embeddings
392
+ config.type_vocab_size,
393
+ padding_idx=config.pad_token_id,
394
+ )
395
+ self.emb_drop = nn.Dropout(config.hidden_dropout_prob)
396
+ self.emb_ln = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
397
+ self.encoder = BertEncoder(config)
398
+ self.pooler = BertPooler(config) if add_pooling_layer else None
399
+
400
+ self.emb_pooler = config.emb_pooler
401
+ self._name_or_path = config._name_or_path
402
+ if self.emb_pooler is not None:
403
+ from transformers import AutoTokenizer
404
+
405
+ self.tokenizer = AutoTokenizer.from_pretrained(config._name_or_path, trust_remote_code=True)
406
+ else:
407
+ self.tokenizer = None
408
+
409
+ self.apply(partial(_init_weights, initializer_range=config.initializer_range))
410
+
411
+ def forward(
412
+ self,
413
+ input_ids,
414
+ position_ids=None,
415
+ token_type_ids=None,
416
+ attention_mask=None,
417
+ masked_tokens_mask=None,
418
+ return_dict=True,
419
+ ):
420
+ """If masked_tokens_mask is not None (i.e. last_layer_subset == True in BertForPreTraining),
421
+ we only want the output for the masked tokens. This means that we only compute the last
422
+ layer output for these tokens.
423
+ masked_tokens_mask: (batch, seqlen), dtype=torch.bool
424
+ """
425
+ hidden_states = self.embeddings(
426
+ input_ids, position_ids=position_ids, token_type_ids=token_type_ids
427
+ )
428
+
429
+ # TD [2022-12:18]: Don't need to force residual in fp32
430
+ # BERT puts embedding LayerNorm before embedding dropout.
431
+ if not self.fused_dropout_add_ln:
432
+ hidden_states = self.emb_ln(hidden_states)
433
+ else:
434
+ hidden_states = layer_norm_fn(
435
+ hidden_states, self.emb_ln.weight, self.emb_ln.bias, eps=self.emb_ln.eps
436
+ )
437
+ hidden_states = self.emb_drop(hidden_states)
438
+
439
+ if masked_tokens_mask is not None:
440
+ batch_size, seqlen = input_ids.shape[:2]
441
+ # We also need the first column for the CLS token
442
+ first_col_mask = torch.zeros(
443
+ batch_size, seqlen, dtype=torch.bool, device=input_ids.device
444
+ )
445
+ first_col_mask[:, 0] = True
446
+ subset_mask = masked_tokens_mask | first_col_mask
447
+ else:
448
+ subset_mask = None
449
+
450
+ sequence_output = self.encoder(
451
+ hidden_states, key_padding_mask=attention_mask, subset_mask=subset_mask
452
+ )
453
+
454
+ if masked_tokens_mask is None:
455
+ pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
456
+ else:
457
+ # TD [2022-03-01]: the indexing here is very tricky.
458
+ if attention_mask is not None:
459
+ subset_idx = subset_mask[attention_mask]
460
+ pool_input = sequence_output[first_col_mask[attention_mask][subset_idx]]
461
+ sequence_output = sequence_output[masked_tokens_mask[attention_mask][subset_idx]]
462
+ else:
463
+ pool_input = sequence_output[first_col_mask[subset_mask]]
464
+ sequence_output = sequence_output[masked_tokens_mask[subset_mask]]
465
+ pooled_output = self.pooler(pool_input, pool=False) if self.pooler is not None else None
466
+
467
+ if not return_dict:
468
+ return (sequence_output, pooled_output)
469
+
470
+ return BaseModelOutputWithPoolingAndCrossAttentions(
471
+ last_hidden_state=sequence_output,
472
+ pooler_output=pooled_output,
473
+ )
474
+
475
+
476
+ @torch.inference_mode()
477
+ def encode(
478
+ self: 'BertModel',
479
+ sentences: Union[str, List[str]],
480
+ batch_size: int = 32,
481
+ show_progress_bar: Optional[bool] = None,
482
+ output_value: str = 'sentence_embedding',
483
+ convert_to_numpy: bool = True,
484
+ convert_to_tensor: bool = False,
485
+ device: Optional[torch.device] = None,
486
+ normalize_embeddings: bool = False,
487
+ **tokenizer_kwargs,
488
+ ) -> Union[List[torch.Tensor], np.ndarray, torch.Tensor]:
489
+ """
490
+ Computes sentence embeddings
491
+ Args:
492
+ sentences(`str` or `List[str]`):
493
+ Sentence or sentences to be encoded
494
+ batch_size(`int`, *optional*, defaults to 32):
495
+ Batch size for the computation
496
+ show_progress_bar(`bool`, *optional*, defaults to None):
497
+ Show a progress bar when encoding sentences.
498
+ If set to None, progress bar is only shown when `logger.level == logging.INFO` or `logger.level == logging.DEBUG`.
499
+ output_value(`str`, *optional*, defaults to 'sentence_embedding'):
500
+ Default sentence_embedding, to get sentence embeddings.
501
+ Can be set to token_embeddings to get wordpiece token embeddings.
502
+ Set to None, to get all output values
503
+ convert_to_numpy(`bool`, *optional*, defaults to True):
504
+ If true, the output is a list of numpy vectors.
505
+ Else, it is a list of pytorch tensors.
506
+ convert_to_tensor(`bool`, *optional*, defaults to False):
507
+ If true, you get one large tensor as return.
508
+ Overwrites any setting from convert_to_numpy
509
+ device(`torch.device`, *optional*, defaults to None):
510
+ Which torch.device to use for the computation
511
+ normalize_embeddings(`bool`, *optional*, defaults to False):
512
+ If set to true, returned vectors will have length 1. In that case, the faster dot-product (util.dot_score) instead of cosine similarity can be used.
513
+ tokenizer_kwargs(`Dict[str, Any]`, *optional*, defaults to {}):
514
+ Keyword arguments for the tokenizer
515
+ Returns:
516
+ By default, a list of tensors is returned.
517
+ If convert_to_tensor, a stacked tensor is returned.
518
+ If convert_to_numpy, a numpy matrix is returned.
519
+ """
520
+ if self.emb_pooler is None:
521
+ warnings.warn("No emb_pooler specified, defaulting to mean pooling.")
522
+ self.emb_pooler = 'mean'
523
+ from transformers import AutoTokenizer
524
+
525
+ self.tokenizer = AutoTokenizer.from_pretrained(self._name_or_path, trust_remote_code=True)
526
+ if self.emb_pooler != 'mean':
527
+ raise NotImplementedError
528
+
529
+ is_training = self.training
530
+ self.eval()
531
+
532
+ if show_progress_bar is None:
533
+ show_progress_bar = (
534
+ logger.getEffectiveLevel() == logging.INFO
535
+ or logger.getEffectiveLevel() == logging.DEBUG
536
+ )
537
+
538
+ if convert_to_tensor:
539
+ convert_to_numpy = False
540
+
541
+ if output_value != 'sentence_embedding':
542
+ convert_to_tensor = False
543
+ convert_to_numpy = False
544
+
545
+ input_was_string = False
546
+ if isinstance(sentences, str) or not hasattr(sentences, '__len__'):
547
+ sentences = [sentences]
548
+ input_was_string = True
549
+
550
+ if device is not None:
551
+ self.to(device)
552
+
553
+ # TODO: Maybe use better length heuristic?
554
+ permutation = np.argsort([-len(i) for i in sentences])
555
+ inverse_permutation = np.argsort(permutation)
556
+ sentences = [sentences[idx] for idx in permutation]
557
+
558
+ tokenizer_kwargs['padding'] = tokenizer_kwargs.get('padding', True)
559
+ tokenizer_kwargs['max_length'] = tokenizer_kwargs.get('max_length', 8192)
560
+ tokenizer_kwargs['truncation'] = tokenizer_kwargs.get('truncation', True)
561
+
562
+ all_embeddings = []
563
+
564
+ if trange is not None:
565
+ range_iter = trange(
566
+ 0,
567
+ len(sentences),
568
+ batch_size,
569
+ desc="Encoding",
570
+ disable=not show_progress_bar,
571
+ )
572
+ else:
573
+ range_iter = range(0, len(sentences), batch_size)
574
+
575
+ for i in range_iter:
576
+ encoded_input = self.tokenizer(
577
+ sentences[i : i + batch_size],
578
+ return_tensors='pt',
579
+ **tokenizer_kwargs,
580
+ ).to(self.device)
581
+ token_embs = self.forward(**encoded_input)[0]
582
+
583
+ # Accumulate in fp32 to avoid overflow
584
+ token_embs = token_embs.float()
585
+
586
+ if output_value == 'token_embeddings':
587
+ raise NotImplementedError
588
+ elif output_value is None:
589
+ raise NotImplementedError
590
+ else:
591
+ embeddings = self.mean_pooling(
592
+ token_embs, encoded_input['attention_mask']
593
+ )
594
+
595
+ if normalize_embeddings:
596
+ embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
597
+
598
+ if convert_to_numpy:
599
+ embeddings = embeddings.cpu()
600
+ all_embeddings.extend(embeddings)
601
+
602
+ all_embeddings = [all_embeddings[idx] for idx in inverse_permutation]
603
+
604
+ if convert_to_tensor:
605
+ all_embeddings = torch.stack(all_embeddings)
606
+ elif convert_to_numpy:
607
+ all_embeddings = np.asarray([emb.numpy() for emb in all_embeddings])
608
+
609
+ if input_was_string:
610
+ all_embeddings = all_embeddings[0]
611
+
612
+ self.train(is_training)
613
+ return all_embeddings
614
+
615
+ def mean_pooling(
616
+ self, token_embeddings: torch.Tensor, attention_mask: torch.Tensor
617
+ ):
618
+ input_mask_expanded = (
619
+ attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
620
+ )
621
+ return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(
622
+ input_mask_expanded.sum(1), min=1e-9
623
+ )
624
+
625
+ class BertForPreTraining(BertPreTrainedModel):
626
+ def __init__(self, config: JinaBertConfig):
627
+ super().__init__(config)
628
+ # If dense_seq_output, we only need to pass the hidden states for the masked out tokens
629
+ # (around 15%) to the classifier heads.
630
+ self.dense_seq_output = getattr(config, "dense_seq_output", False)
631
+ # If last_layer_subset, we only need the compute the last layer for a subset of tokens
632
+ # (e.g., the tokens we need to compute the masked LM loss and the next-sentence prediction).
633
+ self.last_layer_subset = getattr(config, "last_layer_subset", False)
634
+ if self.last_layer_subset:
635
+ assert self.dense_seq_output, "last_layer_subset requires dense_seq_output"
636
+ use_xentropy = getattr(config, "use_xentropy", False)
637
+ if use_xentropy and CrossEntropyLoss is None:
638
+ raise ImportError("xentropy_cuda is not installed")
639
+ loss_cls = (
640
+ nn.CrossEntropyLoss
641
+ if not use_xentropy
642
+ else partial(CrossEntropyLoss, inplace_backward=True)
643
+ )
644
+
645
+ self.bert = BertModel(config)
646
+ self.cls = BertPreTrainingHeads(config)
647
+ self.mlm_loss = loss_cls(ignore_index=0)
648
+ self.nsp_loss = loss_cls(ignore_index=-1)
649
+
650
+ # Initialize weights and apply final processing
651
+ self.apply(partial(_init_weights, initializer_range=config.initializer_range))
652
+ self.tie_weights()
653
+
654
+ def tie_weights(self):
655
+ self.cls.predictions.decoder.weight = self.bert.embeddings.word_embeddings.weight
656
+
657
+ def get_input_embeddings(self):
658
+ return self.bert.embeddings.word_embeddings
659
+
660
+ def forward(
661
+ self,
662
+ input_ids,
663
+ position_ids=None,
664
+ token_type_ids=None,
665
+ attention_mask=None,
666
+ labels=None,
667
+ next_sentence_label=None,
668
+ ):
669
+ """
670
+ If labels are provided, they must be 0 for masked out tokens (as specified in the attention
671
+ mask).
672
+ Outputs:
673
+ if `labels` and `next_sentence_label` are not `None`:
674
+ Outputs the total_loss which is the sum of the masked language modeling loss and the next
675
+ sentence classification loss.
676
+ if `labels` or `next_sentence_label` is `None`:
677
+ Outputs a tuple comprising
678
+ - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and
679
+ - the next sentence classification logits of shape [batch_size, 2].
680
+
681
+ """
682
+ masked_tokens_mask = labels > 0 if (self.last_layer_subset and labels is not None) else None
683
+ outputs = self.bert(
684
+ input_ids,
685
+ position_ids=position_ids,
686
+ token_type_ids=token_type_ids,
687
+ attention_mask=attention_mask.bool() if attention_mask is not None else None,
688
+ masked_tokens_mask=masked_tokens_mask,
689
+ )
690
+ sequence_output, pooled_output = outputs.last_hidden_state, outputs.pooler_output
691
+ if self.dense_seq_output and labels is not None:
692
+ masked_token_idx = torch.nonzero(labels.flatten() > 0, as_tuple=False).flatten()
693
+ if not self.last_layer_subset:
694
+ sequence_output = index_first_axis(
695
+ rearrange(sequence_output, "b s d -> (b s) d"), masked_token_idx
696
+ )
697
+ prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
698
+
699
+ if (
700
+ self.dense_seq_output and labels is not None
701
+ ): # prediction_scores are already flattened
702
+ masked_lm_loss = self.mlm_loss(
703
+ prediction_scores, labels.flatten()[masked_token_idx]
704
+ ).float()
705
+ elif labels is not None:
706
+ masked_lm_loss = self.mlm_loss(
707
+ rearrange(prediction_scores, "... v -> (...) v"),
708
+ rearrange(labels, "... -> (...)"),
709
+ ).float()
710
+ else:
711
+ masked_lm_loss = 0
712
+ if next_sentence_label is not None:
713
+ next_sentence_loss = self.nsp_loss(
714
+ rearrange(seq_relationship_score, "... t -> (...) t"),
715
+ rearrange(next_sentence_label, "... -> (...)"),
716
+ ).float()
717
+ else:
718
+ next_sentence_loss = 0
719
+
720
+ total_loss = masked_lm_loss + next_sentence_loss
721
+
722
+ return BertForPreTrainingOutput(
723
+ loss=total_loss,
724
+ prediction_logits=prediction_scores,
725
+ seq_relationship_logits=seq_relationship_score,
726
+ )
727
+
728
+
729
+ class BertForMaskedLM(BertPreTrainedModel):
730
+ def __init__(self, config: JinaBertConfig):
731
+ super().__init__(config)
732
+ # If dense_seq_output, we only need to pass the hidden states for the masked out tokens
733
+ # (around 15%) to the classifier heads.
734
+ self.dense_seq_output = getattr(config, "dense_seq_output", False)
735
+ # If last_layer_subset, we only need the compute the last layer for a subset of tokens
736
+ # (e.g., the tokens we need to compute the masked LM loss and the next-sentence prediction).
737
+ self.last_layer_subset = getattr(config, "last_layer_subset", False)
738
+ if self.last_layer_subset:
739
+ assert self.dense_seq_output, "last_layer_subset requires dense_seq_output"
740
+ use_xentropy = getattr(config, "use_xentropy", False)
741
+ if use_xentropy and CrossEntropyLoss is None:
742
+ raise ImportError("xentropy_cuda is not installed")
743
+ loss_cls = (
744
+ nn.CrossEntropyLoss
745
+ if not use_xentropy
746
+ else partial(CrossEntropyLoss, inplace_backward=True)
747
+ )
748
+
749
+ self.bert = BertModel(config)
750
+ self.cls = BertPreTrainingHeads(config)
751
+ self.mlm_loss = loss_cls(ignore_index=0)
752
+
753
+ # Initialize weights and apply final processing
754
+ self.apply(partial(_init_weights, initializer_range=config.initializer_range))
755
+ self.tie_weights()
756
+
757
+ def tie_weights(self):
758
+ self.cls.predictions.decoder.weight = self.bert.embeddings.word_embeddings.weight
759
+
760
+ def get_input_embeddings(self):
761
+ return self.bert.embeddings.word_embeddings
762
+
763
+ def forward(
764
+ self,
765
+ input_ids,
766
+ position_ids=None,
767
+ token_type_ids=None,
768
+ attention_mask=None,
769
+ labels=None
770
+ ):
771
+ masked_tokens_mask = labels > 0 if (self.last_layer_subset and labels is not None) else None
772
+ outputs = self.bert(
773
+ input_ids,
774
+ position_ids=position_ids,
775
+ token_type_ids=token_type_ids,
776
+ attention_mask=attention_mask.bool() if attention_mask is not None else None,
777
+ masked_tokens_mask=masked_tokens_mask,
778
+ )
779
+ sequence_output, pooled_output = outputs.last_hidden_state, outputs.pooler_output
780
+ if self.dense_seq_output and labels is not None:
781
+ masked_token_idx = torch.nonzero(labels.flatten() > 0, as_tuple=False).flatten()
782
+ if not self.last_layer_subset:
783
+ sequence_output = index_first_axis(
784
+ rearrange(sequence_output, "b s d -> (b s) d"), masked_token_idx
785
+ )
786
+ prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
787
+
788
+ if (
789
+ self.dense_seq_output and labels is not None
790
+ ): # prediction_scores are already flattened
791
+ masked_lm_loss = self.mlm_loss(
792
+ prediction_scores, labels.flatten()[masked_token_idx]
793
+ ).float()
794
+ elif labels is not None:
795
+ masked_lm_loss = self.mlm_loss(
796
+ rearrange(prediction_scores, "... v -> (...) v"),
797
+ rearrange(labels, "... -> (...)"),
798
+ ).float()
799
+ else:
800
+ raise ValueError('MLM labels must not be None')
801
+
802
+ return BertForPreTrainingOutput(
803
+ loss=masked_lm_loss,
804
+ prediction_logits=prediction_scores,
805
+ seq_relationship_logits=seq_relationship_score,
806
+ )
modeling_for_glue.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Union, Tuple
2
+
3
+ import torch
4
+ from torch import nn
5
+ from torch.nn import CrossEntropyLoss, MSELoss, BCEWithLogitsLoss
6
+ from transformers.modeling_outputs import SequenceClassifierOutput, QuestionAnsweringModelOutput, TokenClassifierOutput
7
+
8
+ from .modeling_bert import BertPreTrainedModel, BertModel
9
+ from .configuration_bert import JinaBertConfig
10
+
11
+
12
+ class BertForSequenceClassification(BertPreTrainedModel):
13
+ def __init__(self, config: JinaBertConfig):
14
+ super().__init__(config)
15
+ self.num_labels = config.num_labels
16
+ self.config = config
17
+
18
+ self.bert = BertModel(config)
19
+ classifier_dropout = (
20
+ config.classifier_dropout
21
+ if config.classifier_dropout is not None
22
+ else config.hidden_dropout_prob
23
+ )
24
+ self.dropout = nn.Dropout(classifier_dropout)
25
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
26
+
27
+ # Initialize weights and apply final processing
28
+ self.post_init()
29
+
30
+
31
+ def forward(
32
+ self,
33
+ input_ids: Optional[torch.Tensor] = None,
34
+ attention_mask: Optional[torch.Tensor] = None,
35
+ token_type_ids: Optional[torch.Tensor] = None,
36
+ position_ids: Optional[torch.Tensor] = None,
37
+ head_mask: Optional[torch.Tensor] = None,
38
+ inputs_embeds: Optional[torch.Tensor] = None,
39
+ labels: Optional[torch.Tensor] = None,
40
+ output_attentions: Optional[bool] = None,
41
+ output_hidden_states: Optional[bool] = None,
42
+ return_dict: Optional[bool] = None,
43
+ ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
44
+ r"""
45
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
46
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
47
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
48
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
49
+ """
50
+ return_dict = (
51
+ return_dict if return_dict is not None else self.config.use_return_dict
52
+ )
53
+
54
+ assert head_mask is None
55
+ assert inputs_embeds is None
56
+ assert output_attentions is None
57
+ assert output_hidden_states is None
58
+ assert return_dict
59
+ outputs = self.bert(
60
+ input_ids,
61
+ attention_mask=attention_mask,
62
+ token_type_ids=token_type_ids,
63
+ position_ids=position_ids,
64
+ )
65
+
66
+ pooled_output = outputs[1]
67
+
68
+ pooled_output = self.dropout(pooled_output)
69
+ logits = self.classifier(pooled_output)
70
+
71
+ loss = None
72
+ if labels is not None:
73
+ if self.config.problem_type is None:
74
+ if self.num_labels == 1:
75
+ self.config.problem_type = "regression"
76
+ elif self.num_labels > 1 and (
77
+ labels.dtype == torch.long or labels.dtype == torch.int
78
+ ):
79
+ self.config.problem_type = "single_label_classification"
80
+ else:
81
+ self.config.problem_type = "multi_label_classification"
82
+
83
+ if self.config.problem_type == "regression":
84
+ loss_fct = MSELoss()
85
+ if self.num_labels == 1:
86
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
87
+ else:
88
+ loss = loss_fct(logits, labels)
89
+ elif self.config.problem_type == "single_label_classification":
90
+ loss_fct = CrossEntropyLoss()
91
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
92
+ elif self.config.problem_type == "multi_label_classification":
93
+ loss_fct = BCEWithLogitsLoss()
94
+ loss = loss_fct(logits, labels)
95
+ if not return_dict:
96
+ output = (logits,) + outputs[2:]
97
+ return ((loss,) + output) if loss is not None else output
98
+
99
+ return SequenceClassifierOutput(
100
+ loss=loss,
101
+ logits=logits,
102
+ hidden_states=outputs.hidden_states,
103
+ attentions=outputs.attentions,
104
+ )
105
+
106
+
107
+ class BertForQuestionAnswering(BertPreTrainedModel):
108
+ def __init__(self, config: JinaBertConfig):
109
+ super().__init__(config)
110
+ self.num_labels = config.num_labels
111
+
112
+ self.bert = BertModel(config, add_pooling_layer=False)
113
+ self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
114
+
115
+ # Initialize weights and apply final processing
116
+ self.post_init()
117
+
118
+ def forward(
119
+ self,
120
+ input_ids: Optional[torch.Tensor] = None,
121
+ attention_mask: Optional[torch.Tensor] = None,
122
+ token_type_ids: Optional[torch.Tensor] = None,
123
+ position_ids: Optional[torch.Tensor] = None,
124
+ head_mask: Optional[torch.Tensor] = None,
125
+ inputs_embeds: Optional[torch.Tensor] = None,
126
+ start_positions: Optional[torch.Tensor] = None,
127
+ end_positions: Optional[torch.Tensor] = None,
128
+ output_attentions: Optional[bool] = None,
129
+ output_hidden_states: Optional[bool] = None,
130
+ return_dict: Optional[bool] = None,
131
+ ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
132
+ r"""
133
+ start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
134
+ Labels for position (index) of the start of the labelled span for computing the token classification loss.
135
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
136
+ are not taken into account for computing the loss.
137
+ end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
138
+ Labels for position (index) of the end of the labelled span for computing the token classification loss.
139
+ Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
140
+ are not taken into account for computing the loss.
141
+ """
142
+ return_dict = (
143
+ return_dict if return_dict is not None else self.config.use_return_dict
144
+ )
145
+
146
+ assert head_mask is None
147
+ assert inputs_embeds is None
148
+ assert output_attentions is None
149
+ assert output_hidden_states is None
150
+ assert return_dict
151
+ outputs = self.bert(
152
+ input_ids,
153
+ attention_mask=attention_mask,
154
+ token_type_ids=token_type_ids,
155
+ position_ids=position_ids,
156
+ )
157
+
158
+ sequence_output = outputs[0]
159
+
160
+ logits = self.qa_outputs(sequence_output)
161
+ start_logits, end_logits = logits.split(1, dim=-1)
162
+ start_logits = start_logits.squeeze(-1).contiguous()
163
+ end_logits = end_logits.squeeze(-1).contiguous()
164
+
165
+ total_loss = None
166
+ if start_positions is not None and end_positions is not None:
167
+ # If we are on multi-GPU, split add a dimension
168
+ if len(start_positions.size()) > 1:
169
+ start_positions = start_positions.squeeze(-1)
170
+ if len(end_positions.size()) > 1:
171
+ end_positions = end_positions.squeeze(-1)
172
+ # sometimes the start/end positions are outside our model inputs, we ignore these terms
173
+ ignored_index = start_logits.size(1)
174
+ start_positions = start_positions.clamp(0, ignored_index)
175
+ end_positions = end_positions.clamp(0, ignored_index)
176
+
177
+ loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
178
+ start_loss = loss_fct(start_logits, start_positions)
179
+ end_loss = loss_fct(end_logits, end_positions)
180
+ total_loss = (start_loss + end_loss) / 2
181
+
182
+ if not return_dict:
183
+ output = (start_logits, end_logits) + outputs[2:]
184
+ return ((total_loss,) + output) if total_loss is not None else output
185
+
186
+ return QuestionAnsweringModelOutput(
187
+ loss=total_loss,
188
+ start_logits=start_logits,
189
+ end_logits=end_logits,
190
+ hidden_states=outputs.hidden_states,
191
+ attentions=outputs.attentions,
192
+ )
193
+
194
+
195
+ class BertForTokenClassification(BertPreTrainedModel):
196
+ def __init__(self, config: JinaBertConfig):
197
+ super().__init__(config)
198
+ self.num_labels = config.num_labels
199
+
200
+ self.bert = BertModel(config, add_pooling_layer=False)
201
+ classifier_dropout = (
202
+ config.classifier_dropout
203
+ if config.classifier_dropout is not None
204
+ else config.hidden_dropout_prob
205
+ )
206
+ self.dropout = nn.Dropout(classifier_dropout)
207
+ self.classifier = nn.Linear(config.hidden_size, config.num_labels)
208
+
209
+ # Initialize weights and apply final processing
210
+ self.post_init()
211
+
212
+ def forward(
213
+ self,
214
+ input_ids: Optional[torch.Tensor] = None,
215
+ attention_mask: Optional[torch.Tensor] = None,
216
+ token_type_ids: Optional[torch.Tensor] = None,
217
+ position_ids: Optional[torch.Tensor] = None,
218
+ head_mask: Optional[torch.Tensor] = None,
219
+ inputs_embeds: Optional[torch.Tensor] = None,
220
+ labels: Optional[torch.Tensor] = None,
221
+ output_attentions: Optional[bool] = None,
222
+ output_hidden_states: Optional[bool] = None,
223
+ return_dict: Optional[bool] = None,
224
+ ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
225
+ r"""
226
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
227
+ Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
228
+ """
229
+ return_dict = (
230
+ return_dict if return_dict is not None else self.config.use_return_dict
231
+ )
232
+
233
+ assert head_mask is None
234
+ assert inputs_embeds is None
235
+ assert output_attentions is None
236
+ assert output_hidden_states is None
237
+ assert return_dict
238
+ outputs = self.bert(
239
+ input_ids,
240
+ attention_mask=attention_mask,
241
+ token_type_ids=token_type_ids,
242
+ position_ids=position_ids,
243
+ )
244
+
245
+ sequence_output = outputs[0]
246
+
247
+ sequence_output = self.dropout(sequence_output)
248
+ logits = self.classifier(sequence_output)
249
+
250
+ loss = None
251
+ if labels is not None:
252
+ loss_fct = CrossEntropyLoss()
253
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
254
+
255
+ if not return_dict:
256
+ output = (logits,) + outputs[2:]
257
+ return ((loss,) + output) if loss is not None else output
258
+
259
+ return TokenClassifierOutput(
260
+ loss=loss,
261
+ logits=logits,
262
+ hidden_states=outputs.hidden_states,
263
+ attentions=outputs.attentions,
264
+ )
modeling_lora.py ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import os
3
+ from functools import partial
4
+ from typing import Iterator, Optional, Tuple, Union
5
+
6
+ import torch
7
+ import torch.nn.utils.parametrize as parametrize
8
+ from torch import nn
9
+ from torch.nn import Parameter
10
+ from transformers import PretrainedConfig
11
+
12
+ from .modeling_bert import BertModel, BertPreTrainedModel, JinaBertConfig
13
+
14
+
15
+ def initialized_weights(
16
+ shape: Tuple[int], num_adaptions: int, init: str = "kaiming"
17
+ ) -> torch.Tensor:
18
+ weight_data = []
19
+ for _ in range(num_adaptions):
20
+ new_adaption = torch.zeros(shape)
21
+ if init == "kaiming":
22
+ nn.init.kaiming_uniform_(new_adaption, a=math.sqrt(5))
23
+ elif init == "normal":
24
+ nn.init.normal_(new_adaption)
25
+ else:
26
+ raise NotImplementedError
27
+ weight_data.append(new_adaption)
28
+ return torch.stack(weight_data, dim=0)
29
+
30
+
31
+ class LoRAParametrization(nn.Module):
32
+ """
33
+ This LoRA implementation was inspired by https://github.com/cccntu/minLoRA
34
+
35
+ The MIT License (MIT) Copyright (c) 2020 Andrej Karpathy
36
+
37
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software
38
+ and associated documentation files (the "Software"), to deal in the Software without restriction,
39
+ including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
40
+ and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
41
+ subject to the following conditions:
42
+
43
+ The above copyright notice and this permission notice shall be included in all copies or substantial
44
+ portions of the Software.
45
+
46
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
47
+ LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
48
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
49
+ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
50
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
51
+ """
52
+ def __init__(
53
+ self,
54
+ fan_in: int,
55
+ fan_out: int,
56
+ layer_type: str = "linear",
57
+ num_adaptions: int = 1,
58
+ rank: int = 4,
59
+ lora_dropout_p: float = 0.0,
60
+ lora_alpha: float = 1,
61
+ ):
62
+ super().__init__()
63
+ # if weight is stored as (fan_out, fan_in), the memory layout of A & B follows (W + BA)x
64
+ # otherwise, it's x(W + AB). This allows us to tie the weights between linear layers and embeddings
65
+ fan_in_fan_out = layer_type == "embedding"
66
+ self.swap = (lambda x: (x[1], x[0])) if fan_in_fan_out else (lambda x: x)
67
+
68
+ # For the officially "correct" LoRA initialization, check here: https://github.com/microsoft/LoRA
69
+ # TODO: Ensure that the initialization here is correct
70
+ if layer_type == "linear":
71
+ self.lora_A = nn.Parameter(
72
+ initialized_weights((rank, fan_in), num_adaptions, init="kaiming")
73
+ )
74
+ self.lora_B = nn.Parameter(torch.zeros((num_adaptions, fan_out, rank)))
75
+ elif layer_type == "embedding":
76
+ self.lora_A = nn.Parameter(torch.zeros((num_adaptions, fan_in, rank)))
77
+ self.lora_B = nn.Parameter(
78
+ initialized_weights(
79
+ (rank, fan_out), num_adaptions=num_adaptions, init="normal"
80
+ )
81
+ )
82
+ else:
83
+ raise NotImplementedError
84
+
85
+ self.lora_alpha, self.rank = lora_alpha, rank
86
+ self.scaling = lora_alpha / rank
87
+ self.lora_dropout = (
88
+ nn.Dropout(p=lora_dropout_p) if lora_dropout_p > 0 else lambda x: x
89
+ )
90
+ self.dropout_fn = self._dropout if lora_dropout_p > 0 else lambda x: x
91
+ self.register_buffer(
92
+ "lora_dropout_mask",
93
+ torch.ones(self.swap((1, fan_in)), dtype=self.lora_A.dtype),
94
+ persistent=False,
95
+ )
96
+ self.forward_fn = lambda x: x
97
+ self.current_task = None
98
+
99
+ def _dropout(self, A):
100
+ # to mimic the original implementation: A @ dropout(x), we do (A * dropout(ones)) @ x
101
+ return A * self.lora_dropout(self.lora_dropout_mask)
102
+
103
+ def lora_forward(self, X):
104
+ assert self.current_task is not None
105
+ return (
106
+ X
107
+ + torch.matmul(
108
+ *self.swap(
109
+ (
110
+ self.lora_B[self.current_task],
111
+ self.dropout_fn(self.lora_A[self.current_task]),
112
+ )
113
+ )
114
+ ).view(X.shape)
115
+ * self.scaling
116
+ )
117
+
118
+ def forward(self, X):
119
+ return self.forward_fn(X)
120
+
121
+ @property
122
+ def current_task(self):
123
+ return self._current_task
124
+
125
+ @current_task.setter
126
+ def current_task(self, task: Union[None, int]):
127
+ self._current_task = task
128
+ if task is None:
129
+ self.forward_fn = lambda x: x
130
+ else:
131
+ self.forward_fn = self.lora_forward
132
+
133
+ @classmethod
134
+ def from_linear(
135
+ cls,
136
+ layer: nn.Module,
137
+ num_adaptions: int = 1,
138
+ rank: int = 4,
139
+ lora_dropout_p: float = 0.0,
140
+ lora_alpha: int = 1,
141
+ ):
142
+ assert isinstance(layer, nn.Linear)
143
+ fan_out, fan_in = layer.weight.shape
144
+ return cls(
145
+ fan_in,
146
+ fan_out,
147
+ num_adaptions=num_adaptions,
148
+ layer_type="linear",
149
+ rank=rank,
150
+ lora_dropout_p=lora_dropout_p,
151
+ lora_alpha=lora_alpha,
152
+ )
153
+
154
+ @classmethod
155
+ def from_embedding(
156
+ cls, layer, num_adaptions=1, rank=4, lora_dropout_p=0.0, lora_alpha=1
157
+ ):
158
+ assert isinstance(layer, nn.Embedding)
159
+ fan_in, fan_out = layer.weight.shape
160
+ return cls(
161
+ fan_in,
162
+ fan_out,
163
+ num_adaptions=num_adaptions,
164
+ layer_type="embedding",
165
+ rank=rank,
166
+ lora_dropout_p=lora_dropout_p,
167
+ lora_alpha=lora_alpha,
168
+ )
169
+
170
+ @classmethod
171
+ def add_to_layer(
172
+ cls, layer, num_adaptions=1, rank=4, lora_dropout_p=0.0, lora_alpha=1
173
+ ):
174
+ if isinstance(layer, nn.Linear):
175
+ parametrize.register_parametrization(
176
+ layer,
177
+ "weight",
178
+ cls.from_linear(
179
+ layer,
180
+ num_adaptions=num_adaptions,
181
+ rank=rank,
182
+ lora_dropout_p=lora_dropout_p,
183
+ lora_alpha=lora_alpha,
184
+ ),
185
+ )
186
+ elif isinstance(layer, nn.Embedding):
187
+ parametrize.register_parametrization(
188
+ layer,
189
+ "weight",
190
+ cls.from_embedding(
191
+ layer,
192
+ num_adaptions=num_adaptions,
193
+ rank=rank,
194
+ lora_dropout_p=lora_dropout_p,
195
+ lora_alpha=lora_alpha,
196
+ ),
197
+ )
198
+
199
+ @staticmethod
200
+ def select_task_for_layer(layer: nn.Module, task_idx: Optional[int] = None):
201
+ if isinstance(layer, LoRAParametrization):
202
+ layer.current_task = task_idx
203
+
204
+ @staticmethod
205
+ def merge_lora_into_layer(layer: nn.Module):
206
+ if hasattr(layer, "parametrizations"):
207
+ for attr_name in layer.parametrizations.keys():
208
+ parametrize.remove_parametrizations(layer, attr_name, leave_parametrized=True)
209
+
210
+
211
+ class BertLoRA(BertPreTrainedModel):
212
+ def __init__(self, config: JinaBertConfig, bert: Optional[BertModel] = None, add_pooling_layer=True):
213
+ super().__init__(config)
214
+ if bert is None:
215
+ self.bert = BertModel(config, add_pooling_layer=add_pooling_layer)
216
+ else:
217
+ self.bert = bert
218
+ self._is_merged = False
219
+ self._num_adaptions = config.num_loras
220
+ self._register_lora(self._num_adaptions)
221
+ self.main_params_trainable = False
222
+ self._task_idx = None
223
+ # By default, we select the first LoRA
224
+ self.current_task = 0
225
+
226
+ @property
227
+ def main_params_trainable(self):
228
+ return self._main_params_trainable
229
+
230
+ @main_params_trainable.setter
231
+ def main_params_trainable(self, val: bool):
232
+ """Whether the main parameters (i.e. those that are not LoRA) should be trainable.
233
+
234
+ This method sets the `requires_grad_` attribute of the main weights
235
+ and controls which parameters are returned in `self.parameters()`.
236
+
237
+ :param val: Whether or not to make the parameters trainable.
238
+ :return: None
239
+ """
240
+ self._main_params_trainable = val
241
+ for name, param in super().named_parameters():
242
+ if "lora" not in name:
243
+ param.requires_grad_(val)
244
+
245
+ @classmethod
246
+ def from_bert(cls, *args, **kwargs):
247
+ bert = BertModel.from_pretrained(*args, **kwargs)
248
+ config = JinaBertConfig.from_pretrained(*args, **kwargs)
249
+ return cls(config, bert=bert)
250
+
251
+ def merge_lora(self):
252
+ """Merges currently selected LoRA into main weights."""
253
+ if self._is_merged:
254
+ raise Exception('LoRA has already been merged, cannot merge again')
255
+ self._is_merged = True
256
+ self.apply(LoRAParametrization.merge_lora_into_layer)
257
+
258
+ @classmethod
259
+ def from_pretrained(
260
+ cls,
261
+ pretrained_model_name_or_path: Optional[Union[str, os.PathLike]],
262
+ *model_args,
263
+ config: Optional[Union[PretrainedConfig, str, os.PathLike]] = None,
264
+ cache_dir: Optional[Union[str, os.PathLike]] = None,
265
+ ignore_mismatched_sizes: bool = False,
266
+ force_download: bool = False,
267
+ local_files_only: bool = False,
268
+ token: Optional[Union[str, bool]] = None,
269
+ revision: str = "main",
270
+ use_safetensors: bool = None,
271
+ **kwargs,
272
+ ):
273
+ """
274
+ TODO: choose between from_bert and super().from_pretrained
275
+
276
+ We want to be able to load both a pretrained BertModel, and a trained
277
+ BertLoRA via this method. To this end, we need to check which of these
278
+ models we are expected to load.
279
+ """
280
+ return cls.from_bert(pretrained_model_name_or_path)
281
+
282
+ def _register_lora(self, num_adaptions=1, rank=4, lora_dropout_p=0.0, lora_alpha=1):
283
+ self.apply(
284
+ partial(
285
+ LoRAParametrization.add_to_layer,
286
+ num_adaptions=num_adaptions,
287
+ rank=rank,
288
+ lora_dropout_p=lora_dropout_p,
289
+ lora_alpha=lora_alpha,
290
+ )
291
+ )
292
+
293
+ @property
294
+ def current_task(self):
295
+ """ Which LoRA is currently selected
296
+ :return: Integer or None (when LoRA is disabled)
297
+ """
298
+ return self._task_idx
299
+
300
+ @current_task.setter
301
+ def current_task(self, task_idx: Union[None, int]):
302
+ """Set the LoRA that is to be used.
303
+
304
+ The LoRA is specified by `task_idx`, which may be an integer >= 0,
305
+ indexing the available LoRAs. If it is None, no LoRA is used.
306
+
307
+ :param task_idx: Which LoRA to use
308
+ :return:
309
+ """
310
+ if self._is_merged:
311
+ raise Exception('LoRA has been merged, cannot select new task')
312
+ assert task_idx is None or 0 <= task_idx < self._num_adaptions
313
+ if self._task_idx != task_idx:
314
+ # In this case, we need to update the LoRAs everywhere
315
+ self._task_idx = task_idx
316
+ self.apply(
317
+ partial(LoRAParametrization.select_task_for_layer, task_idx=task_idx)
318
+ )
319
+
320
+ def forward(self, *args, current_task: Union[None, int] = -1, **kwargs):
321
+ if current_task is None or current_task >= 0:
322
+ self.current_task = current_task
323
+ return self.bert(*args, **kwargs)
324
+
325
+ def parameters(self, recurse: bool = True) -> Iterator[Parameter]:
326
+ for _, param in self.named_parameters(recurse=recurse):
327
+ yield param
328
+
329
+ def named_parameters(
330
+ self, prefix: str = "", recurse: bool = True, remove_duplicate: bool = True
331
+ ) -> Iterator[Tuple[str, Parameter]]:
332
+ for name, param in super().named_parameters(
333
+ prefix=prefix, recurse=recurse, remove_duplicate=remove_duplicate
334
+ ):
335
+ if "lora" in name or self.main_params_trainable:
336
+ yield name, param