kernel
drbh commited on
Commit
90b3e94
·
1 Parent(s): 56449c1

fix: bump build for torch 2.8

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. build/torch26-cxx11-cu124-x86_64-linux/flash_attn/_ops.py +0 -9
  2. build/torch26-cxx98-cu118-x86_64-linux/flash_attn/__init__.py +0 -393
  3. build/torch26-cxx98-cu118-x86_64-linux/flash_attn/bert_padding.py +0 -218
  4. build/torch26-cxx98-cu118-x86_64-linux/flash_attn/flash_attn_interface.py +0 -1609
  5. build/torch26-cxx98-cu118-x86_64-linux/flash_attn/layers/__init__.py +0 -0
  6. build/torch26-cxx98-cu118-x86_64-linux/flash_attn/layers/patch_embed.py +0 -67
  7. build/torch26-cxx98-cu118-x86_64-linux/flash_attn/layers/rotary.py +0 -483
  8. build/torch26-cxx98-cu118-x86_64-linux/flash_attn/ops/__init__.py +0 -0
  9. build/torch26-cxx98-cu118-x86_64-linux/flash_attn/ops/activations.py +0 -135
  10. build/torch26-cxx98-cu118-x86_64-linux/flash_attn/ops/fused_dense.py +0 -688
  11. build/torch26-cxx98-cu118-x86_64-linux/flash_attn/ops/layer_norm.py +0 -800
  12. build/torch26-cxx98-cu118-x86_64-linux/flash_attn/ops/rms_norm.py +0 -174
  13. build/torch26-cxx98-cu118-x86_64-linux/flash_attn/ops/triton/__init__.py +0 -1
  14. build/torch26-cxx98-cu118-x86_64-linux/flash_attn/ops/triton/cross_entropy.py +0 -330
  15. build/torch26-cxx98-cu118-x86_64-linux/flash_attn/ops/triton/k_activations.py +0 -162
  16. build/torch26-cxx98-cu118-x86_64-linux/flash_attn/ops/triton/layer_norm.py +0 -1252
  17. build/torch26-cxx98-cu118-x86_64-linux/flash_attn/ops/triton/linear.py +0 -594
  18. build/torch26-cxx98-cu118-x86_64-linux/flash_attn/ops/triton/mlp.py +0 -149
  19. build/torch26-cxx98-cu118-x86_64-linux/flash_attn/ops/triton/rotary.py +0 -185
  20. build/torch26-cxx98-cu124-x86_64-linux/flash_attn/__init__.py +0 -393
  21. build/torch26-cxx98-cu124-x86_64-linux/flash_attn/_flash_attn_876ac68_dirty.abi3.so +0 -3
  22. build/torch26-cxx98-cu124-x86_64-linux/flash_attn/_ops.py +0 -9
  23. build/torch26-cxx98-cu124-x86_64-linux/flash_attn/bert_padding.py +0 -218
  24. build/torch26-cxx98-cu124-x86_64-linux/flash_attn/flash_attn_interface.py +0 -1609
  25. build/torch26-cxx98-cu124-x86_64-linux/flash_attn/layers/__init__.py +0 -0
  26. build/torch26-cxx98-cu124-x86_64-linux/flash_attn/layers/patch_embed.py +0 -67
  27. build/torch26-cxx98-cu124-x86_64-linux/flash_attn/layers/rotary.py +0 -483
  28. build/torch26-cxx98-cu124-x86_64-linux/flash_attn/ops/__init__.py +0 -0
  29. build/torch26-cxx98-cu124-x86_64-linux/flash_attn/ops/activations.py +0 -135
  30. build/torch26-cxx98-cu124-x86_64-linux/flash_attn/ops/fused_dense.py +0 -688
  31. build/torch26-cxx98-cu124-x86_64-linux/flash_attn/ops/layer_norm.py +0 -800
  32. build/torch26-cxx98-cu124-x86_64-linux/flash_attn/ops/rms_norm.py +0 -174
  33. build/torch26-cxx98-cu124-x86_64-linux/flash_attn/ops/triton/__init__.py +0 -1
  34. build/torch26-cxx98-cu124-x86_64-linux/flash_attn/ops/triton/cross_entropy.py +0 -330
  35. build/torch26-cxx98-cu124-x86_64-linux/flash_attn/ops/triton/k_activations.py +0 -162
  36. build/torch26-cxx98-cu124-x86_64-linux/flash_attn/ops/triton/layer_norm.py +0 -1252
  37. build/torch26-cxx98-cu124-x86_64-linux/flash_attn/ops/triton/linear.py +0 -594
  38. build/torch26-cxx98-cu124-x86_64-linux/flash_attn/ops/triton/mlp.py +0 -149
  39. build/torch26-cxx98-cu124-x86_64-linux/flash_attn/ops/triton/rotary.py +0 -185
  40. build/torch26-cxx98-cu126-x86_64-linux/flash_attn/__init__.py +0 -393
  41. build/torch26-cxx98-cu126-x86_64-linux/flash_attn/_flash_attn_876ac68_dirty.abi3.so +0 -3
  42. build/torch26-cxx98-cu126-x86_64-linux/flash_attn/_ops.py +0 -9
  43. build/torch26-cxx98-cu126-x86_64-linux/flash_attn/bert_padding.py +0 -218
  44. build/torch26-cxx98-cu126-x86_64-linux/flash_attn/flash_attn_interface.py +0 -1609
  45. build/torch26-cxx98-cu126-x86_64-linux/flash_attn/layers/__init__.py +0 -0
  46. build/torch26-cxx98-cu126-x86_64-linux/flash_attn/layers/patch_embed.py +0 -67
  47. build/torch26-cxx98-cu126-x86_64-linux/flash_attn/layers/rotary.py +0 -483
  48. build/torch26-cxx98-cu126-x86_64-linux/flash_attn/ops/__init__.py +0 -0
  49. build/torch26-cxx98-cu126-x86_64-linux/flash_attn/ops/activations.py +0 -135
  50. build/torch26-cxx98-cu126-x86_64-linux/flash_attn/ops/fused_dense.py +0 -688
build/torch26-cxx11-cu124-x86_64-linux/flash_attn/_ops.py DELETED
@@ -1,9 +0,0 @@
1
- import torch
2
- from . import _flash_attn_876ac68_dirty
3
- ops = torch.ops._flash_attn_876ac68_dirty
4
-
5
- def add_op_namespace_prefix(op_name: str):
6
- """
7
- Prefix op by namespace.
8
- """
9
- return f"_flash_attn_876ac68_dirty::{op_name}"
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu118-x86_64-linux/flash_attn/__init__.py DELETED
@@ -1,393 +0,0 @@
1
- from typing import Optional, List
2
- import torch
3
- from ._ops import ops as flash_attn_ops
4
- from .flash_attn_interface import (
5
- flash_attn_func,
6
- flash_attn_kvpacked_func,
7
- flash_attn_qkvpacked_func,
8
- flash_attn_varlen_func,
9
- flash_attn_varlen_kvpacked_func,
10
- flash_attn_varlen_qkvpacked_func,
11
- flash_attn_with_kvcache,
12
- )
13
-
14
-
15
- def fwd(
16
- q: torch.Tensor,
17
- k: torch.Tensor,
18
- v: torch.Tensor,
19
- out: Optional[torch.Tensor] = None,
20
- alibi_slopes: Optional[torch.Tensor] = None,
21
- p_dropout: float = 0.0,
22
- softmax_scale: Optional[float] = None,
23
- is_causal: bool = False,
24
- window_size_left: int = -1,
25
- window_size_right: int = -1,
26
- softcap: float = 0.0,
27
- return_softmax: bool = False,
28
- gen: Optional[torch.Generator] = None,
29
- ) -> List[torch.Tensor]:
30
- """
31
- Forward pass for multi-head attention.
32
-
33
- Args:
34
- q: Query tensor of shape [batch_size, seqlen_q, num_heads, head_size]
35
- k: Key tensor of shape [batch_size, seqlen_k, num_heads_k, head_size]
36
- v: Value tensor of shape [batch_size, seqlen_k, num_heads_k, head_size]
37
- out: Optional output tensor, same shape as q
38
- alibi_slopes: Optional ALiBi slopes tensor of shape [num_heads] or [batch_size, num_heads]
39
- p_dropout: Dropout probability
40
- softmax_scale: Scale factor for softmax
41
- is_causal: Whether to use causal attention
42
- window_size_left: Window size for left context (-1 for unlimited)
43
- window_size_right: Window size for right context (-1 for unlimited)
44
- softcap: Soft cap for attention weights
45
- return_softmax: Whether to return softmax weights
46
- gen: Optional random number generator
47
-
48
- Returns:
49
- List of tensors: [output, softmax_lse, (softmax if return_softmax)]
50
- """
51
- if softmax_scale is None:
52
- attention_head_dim = q.shape[-1]
53
- softmax_scale = 1.0 / (attention_head_dim**0.5)
54
-
55
- return flash_attn_ops.fwd(
56
- q,
57
- k,
58
- v,
59
- out,
60
- alibi_slopes,
61
- p_dropout,
62
- softmax_scale,
63
- is_causal,
64
- window_size_left,
65
- window_size_right,
66
- softcap,
67
- return_softmax,
68
- gen,
69
- )
70
-
71
-
72
- def varlen_fwd(
73
- q: torch.Tensor,
74
- k: torch.Tensor,
75
- v: torch.Tensor,
76
- cu_seqlens_q: torch.Tensor,
77
- cu_seqlens_k: torch.Tensor,
78
- out: Optional[torch.Tensor] = None,
79
- seqused_k: Optional[torch.Tensor] = None,
80
- leftpad_k: Optional[torch.Tensor] = None,
81
- block_table: Optional[torch.Tensor] = None,
82
- alibi_slopes: Optional[torch.Tensor] = None,
83
- max_seqlen_q: int = 0,
84
- max_seqlen_k: int = 0,
85
- p_dropout: float = 0.0,
86
- softmax_scale: Optional[float] = None,
87
- zero_tensors: bool = False,
88
- is_causal: bool = False,
89
- window_size_left: int = -1,
90
- window_size_right: int = -1,
91
- softcap: float = 0.0,
92
- return_softmax: bool = False,
93
- gen: Optional[torch.Generator] = None,
94
- ) -> List[torch.Tensor]:
95
- """
96
- Forward pass for multi-head attention with variable sequence lengths.
97
-
98
- Args:
99
- q: Query tensor of shape [total_q, num_heads, head_size]
100
- k: Key tensor of shape [total_k, num_heads_k, head_size] or [num_blocks, page_block_size, num_heads_k, head_size]
101
- v: Value tensor of shape [total_k, num_heads_k, head_size] or [num_blocks, page_block_size, num_heads_k, head_size]
102
- cu_seqlens_q: Cumulative sequence lengths for queries of shape [batch_size+1]
103
- cu_seqlens_k: Cumulative sequence lengths for keys of shape [batch_size+1]
104
- out: Optional output tensor of shape [total_q, num_heads, head_size]
105
- seqused_k: Optional tensor specifying how many keys to use per batch element [batch_size]
106
- leftpad_k: Optional left padding for keys of shape [batch_size]
107
- block_table: Optional block table of shape [batch_size, max_num_blocks_per_seq]
108
- alibi_slopes: Optional ALiBi slopes tensor of shape [num_heads] or [batch_size, num_heads]
109
- max_seqlen_q: Maximum sequence length for queries
110
- max_seqlen_k: Maximum sequence length for keys
111
- p_dropout: Dropout probability
112
- softmax_scale: Scale factor for softmax
113
- zero_tensors: Whether to zero tensors before computation
114
- is_causal: Whether to use causal attention
115
- window_size_left: Window size for left context (-1 for unlimited)
116
- window_size_right: Window size for right context (-1 for unlimited)
117
- softcap: Soft cap for attention weights
118
- return_softmax: Whether to return softmax weights
119
- gen: Optional random number generator
120
-
121
- Returns:
122
- List of tensors: [output, softmax_lse, (softmax if return_softmax)]
123
- """
124
- if softmax_scale is None:
125
- attention_head_dim = q.shape[-1]
126
- softmax_scale = 1.0 / (attention_head_dim**0.5)
127
-
128
- return flash_attn_ops.varlen_fwd(
129
- q,
130
- k,
131
- v,
132
- out,
133
- cu_seqlens_q,
134
- cu_seqlens_k,
135
- seqused_k,
136
- leftpad_k,
137
- block_table,
138
- alibi_slopes,
139
- max_seqlen_q,
140
- max_seqlen_k,
141
- p_dropout,
142
- softmax_scale,
143
- zero_tensors,
144
- is_causal,
145
- window_size_left,
146
- window_size_right,
147
- softcap,
148
- return_softmax,
149
- gen,
150
- )
151
-
152
-
153
- def bwd(
154
- dout: torch.Tensor,
155
- q: torch.Tensor,
156
- k: torch.Tensor,
157
- v: torch.Tensor,
158
- out: torch.Tensor,
159
- softmax_lse: torch.Tensor,
160
- dq: Optional[torch.Tensor] = None,
161
- dk: Optional[torch.Tensor] = None,
162
- dv: Optional[torch.Tensor] = None,
163
- alibi_slopes: Optional[torch.Tensor] = None,
164
- p_dropout: float = 0.0,
165
- softmax_scale: Optional[float] = None,
166
- is_causal: bool = False,
167
- window_size_left: int = -1,
168
- window_size_right: int = -1,
169
- softcap: float = 0.0,
170
- deterministic: bool = False,
171
- gen: Optional[torch.Generator] = None,
172
- rng_state: Optional[torch.Tensor] = None,
173
- ) -> List[torch.Tensor]:
174
- """
175
- Backward pass for multi-head attention.
176
-
177
- Args:
178
- dout: Gradient tensor of shape [batch_size, seqlen_q, num_heads, head_size]
179
- q: Query tensor of shape [batch_size, seqlen_q, num_heads, head_size]
180
- k: Key tensor of shape [batch_size, seqlen_k, num_heads_k, head_size]
181
- v: Value tensor of shape [batch_size, seqlen_k, num_heads_k, head_size]
182
- out: Output tensor from forward pass of shape [batch_size, seqlen_q, num_heads, head_size]
183
- softmax_lse: Log-sum-exp values from forward pass of shape [batch_size, num_heads, seqlen_q]
184
- dq: Optional gradient tensor for queries, same shape as q
185
- dk: Optional gradient tensor for keys, same shape as k
186
- dv: Optional gradient tensor for values, same shape as v
187
- alibi_slopes: Optional ALiBi slopes tensor of shape [num_heads] or [batch_size, num_heads]
188
- p_dropout: Dropout probability
189
- softmax_scale: Scale factor for softmax
190
- is_causal: Whether to use causal attention
191
- window_size_left: Window size for left context (-1 for unlimited)
192
- window_size_right: Window size for right context (-1 for unlimited)
193
- softcap: Soft cap for attention weights
194
- deterministic: Whether to use deterministic algorithms
195
- gen: Optional random number generator
196
- rng_state: Optional RNG state from forward pass
197
-
198
- Returns:
199
- List of tensors: [dq, dk, dv]
200
- """
201
- if softmax_scale is None:
202
- attention_head_dim = q.shape[-1]
203
- softmax_scale = 1.0 / (attention_head_dim**0.5)
204
-
205
- return flash_attn_ops.bwd(
206
- dout,
207
- q,
208
- k,
209
- v,
210
- out,
211
- softmax_lse,
212
- dq,
213
- dk,
214
- dv,
215
- alibi_slopes,
216
- p_dropout,
217
- softmax_scale,
218
- is_causal,
219
- window_size_left,
220
- window_size_right,
221
- softcap,
222
- deterministic,
223
- gen,
224
- rng_state,
225
- )
226
-
227
-
228
- def varlen_bwd(
229
- dout: torch.Tensor,
230
- q: torch.Tensor,
231
- k: torch.Tensor,
232
- v: torch.Tensor,
233
- out: torch.Tensor,
234
- softmax_lse: torch.Tensor,
235
- cu_seqlens_q: torch.Tensor,
236
- cu_seqlens_k: torch.Tensor,
237
- dq: Optional[torch.Tensor] = None,
238
- dk: Optional[torch.Tensor] = None,
239
- dv: Optional[torch.Tensor] = None,
240
- alibi_slopes: Optional[torch.Tensor] = None,
241
- max_seqlen_q: int = 0,
242
- max_seqlen_k: int = 0,
243
- p_dropout: float = 0.0,
244
- softmax_scale: Optional[float] = None,
245
- zero_tensors: bool = False,
246
- is_causal: bool = False,
247
- window_size_left: int = -1,
248
- window_size_right: int = -1,
249
- softcap: float = 0.0,
250
- deterministic: bool = False,
251
- gen: Optional[torch.Generator] = None,
252
- rng_state: Optional[torch.Tensor] = None,
253
- ) -> List[torch.Tensor]:
254
- """
255
- Backward pass for multi-head attention with variable sequence lengths.
256
-
257
- Args:
258
- dout: Gradient tensor of shape [batch_size, seqlen_q, num_heads, head_size]
259
- q: Query tensor of shape [batch_size, seqlen_q, num_heads, head_size]
260
- k: Key tensor of shape [batch_size, seqlen_k, num_heads_k, head_size]
261
- v: Value tensor of shape [batch_size, seqlen_k, num_heads_k, head_size]
262
- out: Output tensor from forward pass of shape [batch_size, seqlen_q, num_heads, head_size]
263
- softmax_lse: Log-sum-exp values from forward pass of shape [batch_size, num_heads, seqlen_q]
264
- cu_seqlens_q: Cumulative sequence lengths for queries of shape [batch_size+1]
265
- cu_seqlens_k: Cumulative sequence lengths for keys of shape [batch_size+1]
266
- dq: Optional gradient tensor for queries, same shape as q
267
- dk: Optional gradient tensor for keys, same shape as k
268
- dv: Optional gradient tensor for values, same shape as v
269
- alibi_slopes: Optional ALiBi slopes tensor of shape [num_heads] or [batch_size, num_heads]
270
- max_seqlen_q: Maximum sequence length for queries
271
- max_seqlen_k: Maximum sequence length for keys
272
- p_dropout: Dropout probability
273
- softmax_scale: Scale factor for softmax
274
- zero_tensors: Whether to zero tensors before computation
275
- is_causal: Whether to use causal attention
276
- window_size_left: Window size for left context (-1 for unlimited)
277
- window_size_right: Window size for right context (-1 for unlimited)
278
- softcap: Soft cap for attention weights
279
- deterministic: Whether to use deterministic algorithms
280
- gen: Optional random number generator
281
- rng_state: Optional RNG state from forward pass
282
-
283
- Returns:
284
- List of tensors: [dq, dk, dv]
285
- """
286
- if softmax_scale is None:
287
- attention_head_dim = q.shape[-1]
288
- softmax_scale = 1.0 / (attention_head_dim**0.5)
289
-
290
- return flash_attn_ops.varlen_bwd(
291
- dout,
292
- q,
293
- k,
294
- v,
295
- out,
296
- softmax_lse,
297
- dq,
298
- dk,
299
- dv,
300
- cu_seqlens_q,
301
- cu_seqlens_k,
302
- alibi_slopes,
303
- max_seqlen_q,
304
- max_seqlen_k,
305
- p_dropout,
306
- softmax_scale,
307
- zero_tensors,
308
- is_causal,
309
- window_size_left,
310
- window_size_right,
311
- softcap,
312
- deterministic,
313
- gen,
314
- rng_state,
315
- )
316
-
317
-
318
- def fwd_kvcache(
319
- q: torch.Tensor,
320
- kcache: torch.Tensor,
321
- vcache: torch.Tensor,
322
- k: Optional[torch.Tensor] = None,
323
- v: Optional[torch.Tensor] = None,
324
- seqlens_k: Optional[torch.Tensor] = None,
325
- rotary_cos: Optional[torch.Tensor] = None,
326
- rotary_sin: Optional[torch.Tensor] = None,
327
- cache_batch_idx: Optional[torch.Tensor] = None,
328
- leftpad_k: Optional[torch.Tensor] = None,
329
- block_table: Optional[torch.Tensor] = None,
330
- alibi_slopes: Optional[torch.Tensor] = None,
331
- out: Optional[torch.Tensor] = None,
332
- softmax_scale: Optional[float] = None,
333
- is_causal: bool = False,
334
- window_size_left: int = -1,
335
- window_size_right: int = -1,
336
- softcap: float = 0.0,
337
- is_rotary_interleaved: bool = False,
338
- num_splits: int = 1,
339
- ) -> List[torch.Tensor]:
340
- """
341
- Forward pass for multi-head attention with KV cache.
342
-
343
- Args:
344
- q: Query tensor of shape [batch_size, seqlen_q, num_heads, head_size]
345
- kcache: Key cache tensor of shape [batch_size_c, seqlen_k, num_heads_k, head_size] or [num_blocks, page_block_size, num_heads_k, head_size]
346
- vcache: Value cache tensor of shape [batch_size_c, seqlen_k, num_heads_k, head_size] or [num_blocks, page_block_size, num_heads_k, head_size]
347
- k: Optional new keys tensor of shape [batch_size, seqlen_knew, num_heads_k, head_size]
348
- v: Optional new values tensor of shape [batch_size, seqlen_knew, num_heads_k, head_size]
349
- seqlens_k: Optional sequence lengths for keys of shape [batch_size]
350
- rotary_cos: Optional rotary cosine tensor of shape [seqlen_ro, rotary_dim/2]
351
- rotary_sin: Optional rotary sine tensor of shape [seqlen_ro, rotary_dim/2]
352
- cache_batch_idx: Optional indices to index into the KV cache
353
- leftpad_k: Optional left padding for keys of shape [batch_size]
354
- block_table: Optional block table of shape [batch_size, max_num_blocks_per_seq]
355
- alibi_slopes: Optional ALiBi slopes tensor of shape [num_heads] or [batch_size, num_heads]
356
- out: Optional output tensor, same shape as q
357
- softmax_scale: Scale factor for softmax
358
- is_causal: Whether to use causal attention
359
- window_size_left: Window size for left context (-1 for unlimited)
360
- window_size_right: Window size for right context (-1 for unlimited)
361
- softcap: Soft cap for attention weights
362
- is_rotary_interleaved: Whether rotary embeddings are interleaved
363
- num_splits: Number of splits for computation
364
-
365
- Returns:
366
- List of tensors: [output, softmax_lse]
367
- """
368
- if softmax_scale is None:
369
- attention_head_dim = q.shape[-1]
370
- softmax_scale = 1.0 / (attention_head_dim**0.5)
371
-
372
- return flash_attn_ops.fwd_kvcache(
373
- q,
374
- kcache,
375
- vcache,
376
- k,
377
- v,
378
- seqlens_k,
379
- rotary_cos,
380
- rotary_sin,
381
- cache_batch_idx,
382
- leftpad_k,
383
- block_table,
384
- alibi_slopes,
385
- out,
386
- softmax_scale,
387
- is_causal,
388
- window_size_left,
389
- window_size_right,
390
- softcap,
391
- is_rotary_interleaved,
392
- num_splits,
393
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu118-x86_64-linux/flash_attn/bert_padding.py DELETED
@@ -1,218 +0,0 @@
1
- # Adapted from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/padding.py
2
-
3
- import torch
4
- import torch.nn.functional as F
5
- from einops import rearrange, repeat
6
-
7
-
8
- class IndexFirstAxis(torch.autograd.Function):
9
- @staticmethod
10
- def forward(ctx, input, indices):
11
- ctx.save_for_backward(indices)
12
- assert input.ndim >= 2
13
- ctx.first_axis_dim, other_shape = input.shape[0], input.shape[1:]
14
- second_dim = other_shape.numel()
15
- # TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
16
- # return input[indices]
17
- return torch.gather(
18
- rearrange(input, "b ... -> b (...)"), 0, repeat(indices, "z -> z d", d=second_dim)
19
- ).reshape(-1, *other_shape)
20
-
21
- @staticmethod
22
- def backward(ctx, grad_output):
23
- (indices,) = ctx.saved_tensors
24
- assert grad_output.ndim >= 2
25
- other_shape = grad_output.shape[1:]
26
- grad_output = rearrange(grad_output, "b ... -> b (...)")
27
- grad_input = torch.zeros(
28
- [ctx.first_axis_dim, grad_output.shape[1]],
29
- device=grad_output.device,
30
- dtype=grad_output.dtype,
31
- )
32
- # TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
33
- # grad_input[indices] = grad_output
34
- grad_input.scatter_(0, repeat(indices, "z -> z d", d=grad_output.shape[1]), grad_output)
35
- return grad_input.reshape(ctx.first_axis_dim, *other_shape), None
36
-
37
-
38
- index_first_axis = IndexFirstAxis.apply
39
-
40
-
41
- class IndexPutFirstAxis(torch.autograd.Function):
42
- @staticmethod
43
- def forward(ctx, values, indices, first_axis_dim):
44
- ctx.save_for_backward(indices)
45
- assert indices.ndim == 1
46
- assert values.ndim >= 2
47
- output = torch.zeros(
48
- first_axis_dim, *values.shape[1:], device=values.device, dtype=values.dtype
49
- )
50
- # TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
51
- output[indices] = values
52
- # output.scatter_(0, repeat(indices, 'z -> z d', d=values.shape[1]), values)
53
- return output
54
-
55
- @staticmethod
56
- def backward(ctx, grad_output):
57
- (indices,) = ctx.saved_tensors
58
- # TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
59
- grad_values = grad_output[indices]
60
- # grad_values = torch.gather(grad_output, 0, repeat(indices, 'z -> z d', d=grad_output.shape[1]))
61
- return grad_values, None, None
62
-
63
-
64
- index_put_first_axis = IndexPutFirstAxis.apply
65
-
66
-
67
- class IndexFirstAxisResidual(torch.autograd.Function):
68
- @staticmethod
69
- def forward(ctx, input, indices):
70
- ctx.save_for_backward(indices)
71
- assert input.ndim >= 2
72
- ctx.first_axis_dim, other_shape = input.shape[0], input.shape[1:]
73
- second_dim = other_shape.numel()
74
- # TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
75
- output = input[indices]
76
- # We don't want to reshape input (b ... -> b (...)) since it could change the channel_last
77
- # memory format to channel_first. In other words, input might not be contiguous.
78
- # If we don't detach, Pytorch complains about output being a view and is being modified inplace
79
- return output, input.detach()
80
-
81
- @staticmethod
82
- def backward(ctx, grad_output, grad_residual):
83
- (indices,) = ctx.saved_tensors
84
- assert grad_output.ndim >= 2
85
- other_shape = grad_output.shape[1:]
86
- assert grad_residual.shape[1:] == other_shape
87
- grad_input = grad_residual
88
- # grad_input[indices] += grad_output
89
- indices = indices.reshape(indices.shape[0], *((1,) * (grad_output.ndim - 1)))
90
- indices = indices.expand_as(grad_output)
91
- grad_input.scatter_add_(0, indices, grad_output)
92
- return grad_input.reshape(ctx.first_axis_dim, *other_shape), None
93
-
94
-
95
- index_first_axis_residual = IndexFirstAxisResidual.apply
96
-
97
-
98
- def unpad_input(hidden_states, attention_mask, unused_mask=None):
99
- """
100
- Arguments:
101
- hidden_states: (batch, seqlen, ...)
102
- attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid.
103
- unused_mask: (batch, seqlen), bool / int, 1 means the element is allocated but unused.
104
- Return:
105
- hidden_states: (total_nnz, ...), where total_nnz = number of tokens selected in attention_mask + unused_mask.
106
- indices: (total_nnz), the indices of masked tokens from the flattened input sequence.
107
- cu_seqlens: (batch + 1), the cumulative sequence lengths, used to index into hidden_states.
108
- max_seqlen_in_batch: int
109
- seqused: (batch), returns the number of tokens selected in attention_mask + unused_mask.
110
- """
111
- all_masks = (attention_mask + unused_mask) if unused_mask is not None else attention_mask
112
- seqlens_in_batch = all_masks.sum(dim=-1, dtype=torch.int32)
113
- used_seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
114
- indices = torch.nonzero(all_masks.flatten(), as_tuple=False).flatten()
115
- max_seqlen_in_batch = seqlens_in_batch.max().item()
116
- cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
117
- # TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
118
- # bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
119
- # times larger than it needs to be, wasting memory. It's faster and more memory-efficient to
120
- # index with integer indices. Moreover, torch's index is a bit slower than it needs to be,
121
- # so we write custom forward and backward to make it a bit faster.
122
- return (
123
- index_first_axis(rearrange(hidden_states, "b s ... -> (b s) ..."), indices),
124
- indices,
125
- cu_seqlens,
126
- max_seqlen_in_batch,
127
- used_seqlens_in_batch,
128
- )
129
-
130
-
131
- def unpad_input_for_concatenated_sequences(hidden_states, attention_mask_in_length):
132
- """
133
- Supports concatenating short samples in one sequence. The attention_mask_in_length is utilized to mask other short samples. It helps efficient training of variant lengths-based samples (e.g., the supervised fine-tuning task in large language model).
134
- The motivation for this function is explained [here](https://github.com/Dao-AILab/flash-attention/issues/432#issuecomment-1668822286).
135
-
136
- For example, if batch = 3 and seqlen = 6, the attention_mask_in_length is:
137
- ```
138
- [
139
- [2, 3, 0, 0, 0, 0],
140
- [3, 2, 0, 0, 0, 0],
141
- [6, 0, 0, 0, 0, 0]
142
- ]
143
- ```
144
- , which refers to the 3D-attention mask:
145
- ```
146
- [
147
- [
148
- [1, 0, 0, 0, 0, 0],
149
- [1, 1, 0, 0, 0, 0],
150
- [0, 0, 1, 0, 0, 0],
151
- [0, 0, 1, 1, 0, 0],
152
- [0, 0, 1, 1, 1, 0],
153
- [0, 0, 0, 0, 0, 1]
154
- ],
155
- [
156
- [1, 0, 0, 0, 0, 0],
157
- [1, 1, 0, 0, 0, 0],
158
- [1, 1, 1, 0, 0, 0],
159
- [0, 0, 0, 1, 0, 0],
160
- [0, 0, 0, 1, 1, 0],
161
- [0, 0, 0, 0, 0, 1]
162
- ],
163
- [
164
- [1, 0, 0, 0, 0, 0],
165
- [1, 1, 0, 0, 0, 0],
166
- [1, 1, 1, 0, 0, 0],
167
- [1, 1, 1, 1, 0, 0],
168
- [1, 1, 1, 1, 1, 0],
169
- [1, 1, 1, 1, 1, 1]
170
- ]
171
- ]
172
- ```.
173
-
174
- Arguments:
175
- hidden_states: (batch, seqlen, ...)
176
- attention_mask_in_length: (batch, seqlen), int, a nonzero number (e.g., 1, 2, 3, etc.) means length of concatenated sequence in b-th batch, and 0 means none.
177
- Return:
178
- hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
179
- indices: (total_nnz), the indices of non-masked tokens from the flattened input sequence.
180
- cu_seqlens: (batch + 1), the cumulative sequence lengths, used to index into hidden_states.
181
- max_seqlen_in_batch: int
182
- """
183
- length = attention_mask_in_length.sum(dim=-1)
184
- seqlen = attention_mask_in_length.size(-1)
185
- attention_mask_2d = torch.arange(seqlen, device=length.device, dtype=length.dtype).expand(len(length), seqlen) < length.unsqueeze(1)
186
- real_indices_idx = torch.nonzero(attention_mask_in_length.flatten(), as_tuple=False).flatten()
187
- seqlens_in_batch = attention_mask_in_length.flatten()[real_indices_idx]
188
- indices = torch.nonzero(attention_mask_2d.flatten(), as_tuple=False).flatten()
189
- max_seqlen_in_batch = seqlens_in_batch.max().item()
190
- cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
191
- # TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
192
- # bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
193
- # times larger than it needs to be, wasting memory. It's faster and more memory-efficient to
194
- # index with integer indices. Moreover, torch's index is a bit slower than it needs to be,
195
- # so we write custom forward and backward to make it a bit faster.
196
- return (
197
- index_first_axis(rearrange(hidden_states, "b s ... -> (b s) ..."), indices),
198
- indices,
199
- cu_seqlens,
200
- max_seqlen_in_batch,
201
- )
202
-
203
-
204
- def pad_input(hidden_states, indices, batch, seqlen):
205
- """
206
- Arguments:
207
- hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
208
- indices: (total_nnz), the indices that represent the non-masked tokens of the original padded input sequence.
209
- batch: int, batch size for the padded sequence.
210
- seqlen: int, maximum sequence length for the padded sequence.
211
- Return:
212
- hidden_states: (batch, seqlen, ...)
213
- """
214
- dim = hidden_states.shape[-1]
215
- # output = torch.zeros((batch * seqlen), dim, device=hidden_states.device, dtype=hidden_states.dtype)
216
- # output[indices] = hidden_states
217
- output = index_put_first_axis(hidden_states, indices, batch * seqlen)
218
- return rearrange(output, "(b s) ... -> b s ...", b=batch)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu118-x86_64-linux/flash_attn/flash_attn_interface.py DELETED
@@ -1,1609 +0,0 @@
1
- # Copyright (c) 2023, Tri Dao.
2
-
3
- from typing import Optional, Sequence, Tuple, Union
4
-
5
- import torch
6
- import torch.nn as nn
7
- import os
8
-
9
- # # isort: off
10
- # # We need to import the CUDA kernels after importing torch
11
- # USE_TRITON_ROCM = os.getenv("FLASH_ATTENTION_TRITON_AMD_ENABLE", "FALSE") == "TRUE"
12
- # if USE_TRITON_ROCM:
13
- # from .flash_attn_triton_amd import interface_fa as flash_attn_gpu
14
- # else:
15
- # import flash_attn_2_cuda as flash_attn_gpu
16
-
17
-
18
- from ._ops import ops as flash_attn_gpu
19
-
20
- # # isort: on
21
-
22
- def maybe_contiguous(x):
23
- return x.contiguous() if x is not None and x.stride(-1) != 1 else x
24
-
25
-
26
- def _get_block_size_n(device, head_dim, is_dropout, is_causal):
27
- # This should match the block sizes in the CUDA kernel
28
- assert head_dim <= 256
29
- major, minor = torch.cuda.get_device_capability(device)
30
- is_sm8x = major == 8 and minor > 0 # Only include sm86 and sm89, exclude sm80 (A100)
31
- is_sm80 = major == 8 and minor == 0
32
- is_sm90 = major == 9 and minor == 0
33
- if head_dim <= 32:
34
- return 128
35
- if head_dim <= 64:
36
- return 128 if not is_dropout else 64
37
- elif head_dim <= 96:
38
- return 64
39
- elif head_dim <= 128:
40
- if is_sm8x:
41
- return 64 if (not is_dropout and is_causal) else 32
42
- else:
43
- return 64 if not is_dropout else 32
44
- elif head_dim <= 192:
45
- return 64
46
- elif head_dim <= 224:
47
- return 64
48
- elif head_dim <= 256:
49
- return 64
50
-
51
-
52
- def round_multiple(x, m):
53
- return (x + m - 1) // m * m
54
-
55
-
56
- # torch.compile() support is only enabled for pytorch >= 2.4
57
- # The reason for this is that we are using the new custom_op and register_fake
58
- # APIs, which support inplace modification of inputs in the function itself
59
- if torch.__version__ >= "2.4.0":
60
- _torch_custom_op_wrapper = torch.library.custom_op
61
- _torch_register_fake_wrapper = torch.library.register_fake
62
- else:
63
- def noop_custom_op_wrapper(name, fn=None, /, *, mutates_args, device_types=None, schema=None):
64
- def wrap(func):
65
- return func
66
- if fn is None:
67
- return wrap
68
- return fn
69
- def noop_register_fake_wrapper(op, fn=None, /, *, lib=None, _stacklevel=1):
70
- def wrap(func):
71
- return func
72
- if fn is None:
73
- return wrap
74
- return fn
75
- _torch_custom_op_wrapper = noop_custom_op_wrapper
76
- _torch_register_fake_wrapper = noop_register_fake_wrapper
77
-
78
-
79
- @_torch_custom_op_wrapper("flash_attn::_flash_attn_forward", mutates_args=(), device_types="cuda")
80
- def _flash_attn_forward(
81
- q: torch.Tensor,
82
- k: torch.Tensor,
83
- v: torch.Tensor,
84
- dropout_p: float,
85
- softmax_scale: float,
86
- causal: bool,
87
- window_size_left: int,
88
- window_size_right: int,
89
- softcap: float,
90
- alibi_slopes: Optional[torch.Tensor],
91
- return_softmax: bool
92
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
93
- q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
94
- out, softmax_lse, S_dmask, rng_state = flash_attn_gpu.fwd(
95
- q,
96
- k,
97
- v,
98
- None,
99
- alibi_slopes,
100
- dropout_p,
101
- softmax_scale,
102
- causal,
103
- window_size_left,
104
- window_size_right,
105
- softcap,
106
- return_softmax,
107
- None,
108
- )
109
- return out, softmax_lse, S_dmask, rng_state
110
-
111
-
112
- @_torch_register_fake_wrapper("flash_attn::_flash_attn_forward")
113
- def _flash_attn_forward_fake(
114
- q: torch.Tensor,
115
- k: torch.Tensor,
116
- v: torch.Tensor,
117
- dropout_p: float,
118
- softmax_scale: float,
119
- causal: bool,
120
- window_size_left: int,
121
- window_size_right: int,
122
- softcap: float,
123
- alibi_slopes: Optional[torch.Tensor],
124
- return_softmax: bool
125
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
126
- q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
127
- batch_size, seqlen_q, num_heads, head_size = q.shape
128
- seqlen_k = k.shape[1]
129
- out = torch.empty_like(q)
130
- softmax_lse = torch.empty((batch_size, num_heads, seqlen_q), dtype=torch.float32, device=q.device, layout=q.layout)
131
- p = torch.empty((0,), dtype=q.dtype, device=q.device, layout=q.layout)
132
- if return_softmax:
133
- p = torch.empty((batch_size, num_heads, round_multiple(seqlen_q, 128), round_multiple(seqlen_k, 128)), dtype=q.dtype, device=q.device, layout=q.layout)
134
- rng_state = torch.empty((2,), dtype=torch.int64, device=q.device)
135
-
136
- return out, softmax_lse, p, rng_state
137
-
138
-
139
- if torch.__version__ >= "2.4.0":
140
- _wrapped_flash_attn_forward = torch.ops.flash_attn._flash_attn_forward
141
- else:
142
- _wrapped_flash_attn_forward = _flash_attn_forward
143
-
144
-
145
- @_torch_custom_op_wrapper("flash_attn::_flash_attn_varlen_forward", mutates_args=(), device_types="cuda")
146
- def _flash_attn_varlen_forward(
147
- q: torch.Tensor,
148
- k: torch.Tensor,
149
- v: torch.Tensor,
150
- cu_seqlens_q: torch.Tensor,
151
- cu_seqlens_k: torch.Tensor,
152
- max_seqlen_q: int,
153
- max_seqlen_k: int,
154
- dropout_p: float,
155
- softmax_scale: float,
156
- causal: bool,
157
- window_size_left: int = -1,
158
- window_size_right: int = -1,
159
- softcap: float = 0.0,
160
- alibi_slopes: Optional[torch.Tensor] = None,
161
- return_softmax: bool = False,
162
- block_table: Optional[torch.Tensor] = None,
163
- leftpad_k: Optional[torch.Tensor] = None,
164
- seqused_k: Optional[torch.Tensor] = None,
165
- zero_tensors: bool = False,
166
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
167
- q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
168
- out, softmax_lse, S_dmask, rng_state = flash_attn_gpu.varlen_fwd(
169
- q,
170
- k,
171
- v,
172
- None,
173
- cu_seqlens_q,
174
- cu_seqlens_k,
175
- seqused_k,
176
- leftpad_k,
177
- block_table,
178
- alibi_slopes,
179
- max_seqlen_q,
180
- max_seqlen_k,
181
- dropout_p,
182
- softmax_scale,
183
- zero_tensors,
184
- causal,
185
- window_size_left,
186
- window_size_right,
187
- softcap,
188
- return_softmax,
189
- None,
190
- )
191
- # if out.isnan().any() or softmax_lse.isnan().any():
192
- # breakpoint()
193
- return out, softmax_lse, S_dmask, rng_state
194
-
195
-
196
- @_torch_register_fake_wrapper("flash_attn::_flash_attn_varlen_forward")
197
- def _flash_attn_varlen_forward_fake(
198
- q: torch.Tensor,
199
- k: torch.Tensor,
200
- v: torch.Tensor,
201
- cu_seqlens_q: torch.Tensor,
202
- cu_seqlens_k: torch.Tensor,
203
- max_seqlen_q: int,
204
- max_seqlen_k: int,
205
- dropout_p: float,
206
- softmax_scale: float,
207
- causal: bool,
208
- window_size_left: int = -1,
209
- window_size_right: int = -1,
210
- softcap: float = 0.0,
211
- alibi_slopes: Optional[torch.Tensor] = None,
212
- return_softmax: bool = False,
213
- block_table: Optional[torch.Tensor] = None,
214
- leftpad_k: Optional[torch.Tensor] = None,
215
- seqused_k: Optional[torch.Tensor] = None,
216
- zero_tensors: bool = False,
217
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
218
- q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
219
- paged_kv = block_table is not None
220
- batch_size = cu_seqlens_q.numel() - 1
221
- total_q, num_heads, _ = q.shape
222
-
223
- out = torch.empty_like(q)
224
- softmax_lse = torch.empty((num_heads, total_q), dtype=torch.float32, device=q.device, layout=q.layout)
225
- p = torch.empty((0,), dtype=q.dtype, device=q.device, layout=q.layout)
226
- seqlen_q_rounded = round_multiple(max_seqlen_q, 128)
227
- seqlen_k_rounded = round_multiple(max_seqlen_k, 128)
228
- if return_softmax:
229
- p = torch.empty((batch_size, num_heads, seqlen_q_rounded, seqlen_k_rounded), dtype=q.dtype, device=q.device, layout=q.layout)
230
- rng_state = torch.empty((2,), dtype=torch.int64, device=q.device)
231
- return out, softmax_lse, p, rng_state
232
-
233
-
234
- if torch.__version__ >= "2.4.0":
235
- _wrapped_flash_attn_varlen_forward = torch.ops.flash_attn._flash_attn_varlen_forward
236
- else:
237
- _wrapped_flash_attn_varlen_forward = _flash_attn_varlen_forward
238
-
239
-
240
- @_torch_custom_op_wrapper("flash_attn::_flash_attn_backward", mutates_args=("dq", "dk", "dv"), device_types="cuda")
241
- def _flash_attn_backward(
242
- dout: torch.Tensor,
243
- q: torch.Tensor,
244
- k: torch.Tensor,
245
- v: torch.Tensor,
246
- out: torch.Tensor,
247
- softmax_lse: torch.Tensor,
248
- dq: Optional[torch.Tensor],
249
- dk: Optional[torch.Tensor],
250
- dv: Optional[torch.Tensor],
251
- dropout_p: float,
252
- softmax_scale: float,
253
- causal: bool,
254
- window_size_left: int,
255
- window_size_right: int,
256
- softcap: float,
257
- alibi_slopes: Optional[torch.Tensor],
258
- deterministic: bool,
259
- rng_state: Optional[torch.Tensor] = None,
260
- ) -> torch.Tensor:
261
- # dq, dk, dv are allocated by us so they should already be contiguous
262
- dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
263
- (
264
- dq,
265
- dk,
266
- dv,
267
- softmax_d,
268
- ) = flash_attn_gpu.bwd(
269
- dout,
270
- q,
271
- k,
272
- v,
273
- out,
274
- softmax_lse,
275
- dq,
276
- dk,
277
- dv,
278
- alibi_slopes,
279
- dropout_p,
280
- softmax_scale,
281
- causal,
282
- window_size_left,
283
- window_size_right,
284
- softcap,
285
- deterministic,
286
- None,
287
- rng_state,
288
- )
289
- return softmax_d
290
-
291
-
292
- @_torch_register_fake_wrapper("flash_attn::_flash_attn_backward")
293
- def _flash_attn_backward_fake(
294
- dout: torch.Tensor,
295
- q: torch.Tensor,
296
- k: torch.Tensor,
297
- v: torch.Tensor,
298
- out: torch.Tensor,
299
- softmax_lse: torch.Tensor,
300
- dq: Optional[torch.Tensor],
301
- dk: Optional[torch.Tensor],
302
- dv: Optional[torch.Tensor],
303
- dropout_p: float,
304
- softmax_scale: float,
305
- causal: bool,
306
- window_size_left: int,
307
- window_size_right: int,
308
- softcap: float,
309
- alibi_slopes: Optional[torch.Tensor],
310
- deterministic: bool,
311
- rng_state: Optional[torch.Tensor] = None,
312
- ) -> torch.Tensor:
313
- dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
314
- if dq is None:
315
- dq = torch.empty_like(q)
316
- if dk is None:
317
- dk = torch.empty_like(k)
318
- if dv is None:
319
- dv = torch.empty_like(v)
320
- batch_size, seqlen_q, num_heads, _ = q.shape
321
- softmax_d = torch.empty((batch_size, num_heads, round_multiple(seqlen_q, 128)), device=q.device, dtype=torch.float32)
322
-
323
- return softmax_d
324
-
325
-
326
- if torch.__version__ >= "2.4.0":
327
- _wrapped_flash_attn_backward = torch.ops.flash_attn._flash_attn_backward
328
- else:
329
- _wrapped_flash_attn_backward = _flash_attn_backward
330
-
331
-
332
- @_torch_custom_op_wrapper("flash_attn::_flash_attn_varlen_backward", mutates_args=("dq", "dk", "dv"), device_types="cuda")
333
- def _flash_attn_varlen_backward(
334
- dout: torch.Tensor,
335
- q: torch.Tensor,
336
- k: torch.Tensor,
337
- v: torch.Tensor,
338
- out: torch.Tensor,
339
- softmax_lse: torch.Tensor,
340
- dq: Optional[torch.Tensor],
341
- dk: Optional[torch.Tensor],
342
- dv: Optional[torch.Tensor],
343
- cu_seqlens_q: torch.Tensor,
344
- cu_seqlens_k: torch.Tensor,
345
- max_seqlen_q: int,
346
- max_seqlen_k: int,
347
- dropout_p: float,
348
- softmax_scale: float,
349
- causal: bool,
350
- window_size_left: int,
351
- window_size_right: int,
352
- softcap: float,
353
- alibi_slopes: Optional[torch.Tensor],
354
- deterministic: bool,
355
- rng_state: Optional[torch.Tensor] = None,
356
- zero_tensors: bool = False,
357
- ) -> torch.Tensor:
358
- # dq, dk, dv are allocated by us so they should already be contiguous
359
- dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
360
- (
361
- dq,
362
- dk,
363
- dv,
364
- softmax_d,
365
- ) = flash_attn_gpu.varlen_bwd(
366
- dout,
367
- q,
368
- k,
369
- v,
370
- out,
371
- softmax_lse,
372
- dq,
373
- dk,
374
- dv,
375
- cu_seqlens_q,
376
- cu_seqlens_k,
377
- alibi_slopes,
378
- max_seqlen_q,
379
- max_seqlen_k,
380
- dropout_p,
381
- softmax_scale,
382
- zero_tensors,
383
- causal,
384
- window_size_left,
385
- window_size_right,
386
- softcap,
387
- deterministic,
388
- None,
389
- rng_state,
390
- )
391
- # if dk.isnan().any() or dk.isnan().any() or dv.isnan().any() or softmax_d.isnan().any():
392
- # breakpoint()
393
- return softmax_d
394
-
395
-
396
- @_torch_register_fake_wrapper("flash_attn::_flash_attn_varlen_backward")
397
- def _flash_attn_varlen_backward_fake(
398
- dout: torch.Tensor,
399
- q: torch.Tensor,
400
- k: torch.Tensor,
401
- v: torch.Tensor,
402
- out: torch.Tensor,
403
- softmax_lse: torch.Tensor,
404
- dq: Optional[torch.Tensor],
405
- dk: Optional[torch.Tensor],
406
- dv: Optional[torch.Tensor],
407
- cu_seqlens_q: torch.Tensor,
408
- cu_seqlens_k: torch.Tensor,
409
- max_seqlen_q: int,
410
- max_seqlen_k: int,
411
- dropout_p: float,
412
- softmax_scale: float,
413
- causal: bool,
414
- window_size_left: int,
415
- window_size_right: int,
416
- softcap: float,
417
- alibi_slopes: Optional[torch.Tensor],
418
- deterministic: bool,
419
- rng_state: Optional[torch.Tensor] = None,
420
- zero_tensors: bool = False,
421
- ) -> torch.Tensor:
422
- dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
423
- batch_size = cu_seqlens_q.numel() - 1
424
- total_q, num_heads, _ = q.shape
425
-
426
- if dq is None:
427
- dq = torch.empty_like(q)
428
- if dk is None:
429
- dk = torch.empty_like(k)
430
- if dv is None:
431
- dv = torch.empty_like(v)
432
- softmax_d = torch.empty((num_heads, total_q + 128 * batch_size), device=q.device, dtype=torch.float32)
433
-
434
- return softmax_d
435
-
436
-
437
- if torch.__version__ >= "2.4.0":
438
- _wrapped_flash_attn_varlen_backward = torch.ops.flash_attn._flash_attn_varlen_backward
439
- else:
440
- _wrapped_flash_attn_varlen_backward = _flash_attn_varlen_backward
441
-
442
-
443
- class FlashAttnQKVPackedFunc(torch.autograd.Function):
444
- @staticmethod
445
- def forward(
446
- ctx,
447
- qkv,
448
- dropout_p,
449
- softmax_scale,
450
- causal,
451
- window_size,
452
- softcap,
453
- alibi_slopes,
454
- deterministic,
455
- return_softmax,
456
- is_grad_enabled,
457
- ):
458
- is_grad = is_grad_enabled and qkv.requires_grad
459
- if softmax_scale is None:
460
- softmax_scale = qkv.shape[-1] ** (-0.5)
461
- q, k, v = qkv[:, :, 0].detach(), qkv[:, :, 1].detach(), qkv[:, :, 2].detach()
462
- head_size_og = q.size(3)
463
- if head_size_og % 8 != 0:
464
- q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
465
- k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
466
- v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
467
- out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_forward(
468
- q,
469
- k,
470
- v,
471
- dropout_p,
472
- softmax_scale,
473
- causal=causal,
474
- window_size_left=window_size[0],
475
- window_size_right=window_size[1],
476
- softcap=softcap,
477
- alibi_slopes=alibi_slopes,
478
- return_softmax=return_softmax and dropout_p > 0,
479
- )
480
- if is_grad:
481
- ctx.save_for_backward(q, k, v, out_padded, softmax_lse, rng_state)
482
- ctx.dropout_p = dropout_p
483
- ctx.softmax_scale = softmax_scale
484
- ctx.causal = causal
485
- ctx.window_size = window_size
486
- ctx.softcap = softcap
487
- ctx.alibi_slopes = alibi_slopes
488
- ctx.deterministic = deterministic
489
- out = out_padded[..., :head_size_og]
490
- return out if not return_softmax else (out, softmax_lse, S_dmask)
491
-
492
- @staticmethod
493
- def backward(ctx, dout, *args):
494
- q, k, v, out, softmax_lse, rng_state = ctx.saved_tensors
495
- qkv_shape = q.shape[:-2] + (3, *q.shape[-2:])
496
- dqkv = torch.empty(qkv_shape, dtype=q.dtype, device=q.device)
497
- head_size_og = dout.size(3)
498
- dout_padded = dout
499
- if head_size_og % 8 != 0:
500
- dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
501
- _wrapped_flash_attn_backward(
502
- dout_padded,
503
- q,
504
- k,
505
- v,
506
- out,
507
- softmax_lse,
508
- dqkv[:, :, 0],
509
- dqkv[:, :, 1],
510
- dqkv[:, :, 2],
511
- ctx.dropout_p,
512
- ctx.softmax_scale,
513
- ctx.causal,
514
- ctx.window_size[0],
515
- ctx.window_size[1],
516
- ctx.softcap,
517
- ctx.alibi_slopes,
518
- ctx.deterministic,
519
- rng_state=rng_state,
520
- )
521
- dqkv = dqkv[..., : dout.shape[-1]] # We could have padded the head dimension
522
- return dqkv, None, None, None, None, None, None, None, None, None
523
-
524
-
525
- class FlashAttnVarlenQKVPackedFunc(torch.autograd.Function):
526
- @staticmethod
527
- def forward(
528
- ctx,
529
- qkv,
530
- cu_seqlens,
531
- max_seqlen,
532
- dropout_p,
533
- softmax_scale,
534
- causal,
535
- window_size,
536
- softcap,
537
- alibi_slopes,
538
- deterministic,
539
- return_softmax,
540
- is_grad_enabled,
541
- ):
542
- is_grad = is_grad_enabled and qkv.requires_grad
543
- if softmax_scale is None:
544
- softmax_scale = qkv.shape[-1] ** (-0.5)
545
- q, k, v = qkv[:, 0].detach(), qkv[:, 1].detach(), qkv[:, 2].detach()
546
- head_size_og = q.size(2)
547
- if head_size_og % 8 != 0:
548
- q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
549
- k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
550
- v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
551
- out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_varlen_forward(
552
- q,
553
- k,
554
- v,
555
- cu_seqlens,
556
- cu_seqlens,
557
- max_seqlen,
558
- max_seqlen,
559
- dropout_p,
560
- softmax_scale,
561
- causal=causal,
562
- window_size_left=window_size[0],
563
- window_size_right=window_size[1],
564
- softcap=softcap,
565
- alibi_slopes=alibi_slopes,
566
- return_softmax=return_softmax and dropout_p > 0,
567
- block_table=None,
568
- )
569
- if is_grad:
570
- ctx.save_for_backward(q, k, v, out_padded, softmax_lse, cu_seqlens, rng_state)
571
- ctx.dropout_p = dropout_p
572
- ctx.max_seqlen = max_seqlen
573
- ctx.softmax_scale = softmax_scale
574
- ctx.causal = causal
575
- ctx.window_size = window_size
576
- ctx.softcap = softcap
577
- ctx.alibi_slopes = alibi_slopes
578
- ctx.deterministic = deterministic
579
- out = out_padded[..., :head_size_og]
580
- return out if not return_softmax else (out, softmax_lse, S_dmask)
581
-
582
- @staticmethod
583
- def backward(ctx, dout, *args):
584
- q, k, v, out, softmax_lse, cu_seqlens, rng_state = ctx.saved_tensors
585
- qkv_shape = q.shape[:-2] + (3, *q.shape[-2:])
586
- dqkv = torch.empty(qkv_shape, dtype=q.dtype, device=q.device)
587
- head_size_og = dout.size(2)
588
- dout_padded = dout
589
- if head_size_og % 8 != 0:
590
- dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
591
- _wrapped_flash_attn_varlen_backward(
592
- dout_padded,
593
- q,
594
- k,
595
- v,
596
- out,
597
- softmax_lse,
598
- dqkv[:, 0],
599
- dqkv[:, 1],
600
- dqkv[:, 2],
601
- cu_seqlens,
602
- cu_seqlens,
603
- ctx.max_seqlen,
604
- ctx.max_seqlen,
605
- ctx.dropout_p,
606
- ctx.softmax_scale,
607
- ctx.causal,
608
- ctx.window_size[0],
609
- ctx.window_size[1],
610
- ctx.softcap,
611
- ctx.alibi_slopes,
612
- ctx.deterministic,
613
- rng_state=rng_state,
614
- )
615
- dqkv = dqkv[..., : dout.shape[-1]] # We could have padded the head dimension
616
- return dqkv, None, None, None, None, None, None, None, None, None, None, None
617
-
618
-
619
- class FlashAttnKVPackedFunc(torch.autograd.Function):
620
- @staticmethod
621
- def forward(
622
- ctx,
623
- q,
624
- kv,
625
- dropout_p,
626
- softmax_scale,
627
- causal,
628
- window_size,
629
- softcap,
630
- alibi_slopes,
631
- deterministic,
632
- return_softmax,
633
- is_grad_enabled,
634
- ):
635
- is_grad = is_grad_enabled and any(
636
- x.requires_grad for x in [q, kv]
637
- )
638
- if softmax_scale is None:
639
- softmax_scale = q.shape[-1] ** (-0.5)
640
- k, v = kv[:, :, 0].detach(), kv[:, :, 1].detach()
641
- head_size_og = q.size(3)
642
- if head_size_og % 8 != 0:
643
- q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
644
- k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
645
- v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
646
- out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_forward(
647
- q,
648
- k,
649
- v,
650
- dropout_p,
651
- softmax_scale,
652
- causal=causal,
653
- window_size_left=window_size[0],
654
- window_size_right=window_size[1],
655
- softcap=softcap,
656
- alibi_slopes=alibi_slopes,
657
- return_softmax=return_softmax and dropout_p > 0,
658
- )
659
- if is_grad:
660
- ctx.save_for_backward(q, k, v, out_padded, softmax_lse, rng_state)
661
- ctx.dropout_p = dropout_p
662
- ctx.softmax_scale = softmax_scale
663
- ctx.causal = causal
664
- ctx.window_size = window_size
665
- ctx.softcap = softcap
666
- ctx.alibi_slopes = alibi_slopes
667
- ctx.deterministic = deterministic
668
- out = out_padded[..., :head_size_og]
669
- return out if not return_softmax else (out, softmax_lse, S_dmask)
670
-
671
- @staticmethod
672
- def backward(ctx, dout, *args):
673
- q, k, v, out, softmax_lse, rng_state = ctx.saved_tensors
674
- dq = torch.empty_like(q)
675
- kv_shape = k.shape[:-2] + (2, *k.shape[-2:])
676
- dkv = torch.empty(kv_shape, dtype=k.dtype, device=k.device)
677
- head_size_og = dout.size(3)
678
- dout_padded = dout
679
- if head_size_og % 8 != 0:
680
- dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
681
- _wrapped_flash_attn_backward(
682
- dout_padded,
683
- q,
684
- k,
685
- v,
686
- out,
687
- softmax_lse,
688
- dq,
689
- dkv[:, :, 0],
690
- dkv[:, :, 1],
691
- ctx.dropout_p,
692
- ctx.softmax_scale,
693
- ctx.causal,
694
- ctx.window_size[0],
695
- ctx.window_size[1],
696
- ctx.softcap,
697
- ctx.alibi_slopes,
698
- ctx.deterministic,
699
- rng_state=rng_state,
700
- )
701
- dq = dq[..., : dout.shape[-1]] # We could have padded the head dimension
702
- dkv = dkv[..., : dout.shape[-1]]
703
- return dq, dkv, None, None, None, None, None, None, None, None, None
704
-
705
-
706
- class FlashAttnVarlenKVPackedFunc(torch.autograd.Function):
707
- @staticmethod
708
- def forward(
709
- ctx,
710
- q,
711
- kv,
712
- cu_seqlens_q,
713
- cu_seqlens_k,
714
- max_seqlen_q,
715
- max_seqlen_k,
716
- dropout_p,
717
- softmax_scale,
718
- causal,
719
- window_size,
720
- softcap,
721
- alibi_slopes,
722
- deterministic,
723
- return_softmax,
724
- is_grad_enabled,
725
- ):
726
- is_grad = is_grad_enabled and any(
727
- x.requires_grad for x in [q, kv]
728
- )
729
- if softmax_scale is None:
730
- softmax_scale = q.shape[-1] ** (-0.5)
731
- k, v = kv[:, 0].detach(), kv[:, 1].detach()
732
- head_size_og = q.size(2)
733
- if head_size_og % 8 != 0:
734
- q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
735
- k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
736
- v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
737
- out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_varlen_forward(
738
- q,
739
- k,
740
- v,
741
- cu_seqlens_q,
742
- cu_seqlens_k,
743
- max_seqlen_q,
744
- max_seqlen_k,
745
- dropout_p,
746
- softmax_scale,
747
- causal=causal,
748
- window_size_left=window_size[0],
749
- window_size_right=window_size[1],
750
- softcap=softcap,
751
- alibi_slopes=alibi_slopes,
752
- return_softmax=return_softmax and dropout_p > 0,
753
- block_table=None,
754
- )
755
- if is_grad:
756
- ctx.save_for_backward(
757
- q, k, v, out_padded, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state
758
- )
759
- ctx.dropout_p = dropout_p
760
- ctx.max_seqlen_q = max_seqlen_q
761
- ctx.max_seqlen_k = max_seqlen_k
762
- ctx.softmax_scale = softmax_scale
763
- ctx.causal = causal
764
- ctx.window_size = window_size
765
- ctx.softcap = softcap
766
- ctx.alibi_slopes = alibi_slopes
767
- ctx.deterministic = deterministic
768
- out = out_padded[..., :head_size_og]
769
- return out if not return_softmax else (out, softmax_lse, S_dmask)
770
-
771
- @staticmethod
772
- def backward(ctx, dout, *args):
773
- q, k, v, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state = ctx.saved_tensors
774
- dq = torch.empty_like(q)
775
- kv_shape = k.shape[:-2] + (2, *k.shape[-2:])
776
- dkv = torch.empty(kv_shape, dtype=k.dtype, device=k.device)
777
- head_size_og = dout.size(2)
778
- dout_padded = dout
779
- if head_size_og % 8 != 0:
780
- dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
781
- _wrapped_flash_attn_varlen_backward(
782
- dout_padded,
783
- q,
784
- k,
785
- v,
786
- out,
787
- softmax_lse,
788
- dq,
789
- dkv[:, 0],
790
- dkv[:, 1],
791
- cu_seqlens_q,
792
- cu_seqlens_k,
793
- ctx.max_seqlen_q,
794
- ctx.max_seqlen_k,
795
- ctx.dropout_p,
796
- ctx.softmax_scale,
797
- ctx.causal,
798
- ctx.window_size[0],
799
- ctx.window_size[1],
800
- ctx.softcap,
801
- ctx.alibi_slopes,
802
- ctx.deterministic,
803
- rng_state=rng_state,
804
- )
805
- dq = dq[..., : dout.shape[-1]] # We could have padded the head dimension
806
- dkv = dkv[..., : dout.shape[-1]]
807
- return dq, dkv, None, None, None, None, None, None, None, None, None, None, None, None, None
808
-
809
-
810
- class FlashAttnFunc(torch.autograd.Function):
811
- @staticmethod
812
- def forward(
813
- ctx,
814
- q,
815
- k,
816
- v,
817
- dropout_p,
818
- softmax_scale,
819
- causal,
820
- window_size,
821
- softcap,
822
- alibi_slopes,
823
- deterministic,
824
- return_softmax,
825
- is_grad_enabled,
826
- ):
827
- is_grad = is_grad_enabled and any(
828
- x.requires_grad for x in [q, k, v]
829
- )
830
- if softmax_scale is None:
831
- softmax_scale = q.shape[-1] ** (-0.5)
832
- head_size_og = q.size(3)
833
- if head_size_og % 8 != 0:
834
- q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
835
- k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
836
- v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
837
- out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_forward(
838
- q,
839
- k,
840
- v,
841
- dropout_p,
842
- softmax_scale,
843
- causal=causal,
844
- window_size_left=window_size[0],
845
- window_size_right=window_size[1],
846
- softcap=softcap,
847
- alibi_slopes=alibi_slopes,
848
- return_softmax=return_softmax and dropout_p > 0,
849
- )
850
- if is_grad:
851
- ctx.save_for_backward(q, k, v, out_padded, softmax_lse, rng_state)
852
- ctx.dropout_p = dropout_p
853
- ctx.softmax_scale = softmax_scale
854
- ctx.causal = causal
855
- ctx.window_size = window_size
856
- ctx.softcap = softcap
857
- ctx.alibi_slopes = alibi_slopes
858
- ctx.deterministic = deterministic
859
- out = out_padded[..., :head_size_og]
860
- return out if not return_softmax else (out, softmax_lse, S_dmask)
861
-
862
- @staticmethod
863
- def backward(ctx, dout, *args):
864
- q, k, v, out, softmax_lse, rng_state = ctx.saved_tensors
865
- dq, dk, dv = torch.empty_like(q), torch.empty_like(k), torch.empty_like(v)
866
- head_size_og = dout.size(3)
867
- dout_padded = dout
868
- if head_size_og % 8 != 0:
869
- dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
870
- _wrapped_flash_attn_backward(
871
- dout_padded,
872
- q,
873
- k,
874
- v,
875
- out,
876
- softmax_lse,
877
- dq,
878
- dk,
879
- dv,
880
- ctx.dropout_p,
881
- ctx.softmax_scale,
882
- ctx.causal,
883
- ctx.window_size[0],
884
- ctx.window_size[1],
885
- ctx.softcap,
886
- ctx.alibi_slopes,
887
- ctx.deterministic,
888
- rng_state=rng_state,
889
- )
890
- dq = dq[..., : dout.shape[-1]] # We could have padded the head dimension
891
- dk = dk[..., : dout.shape[-1]]
892
- dv = dv[..., : dout.shape[-1]]
893
- return dq, dk, dv, None, None, None, None, None, None, None, None, None
894
-
895
-
896
- class FlashAttnVarlenFunc(torch.autograd.Function):
897
- @staticmethod
898
- def forward(
899
- ctx,
900
- q,
901
- k,
902
- v,
903
- cu_seqlens_q,
904
- cu_seqlens_k,
905
- max_seqlen_q,
906
- max_seqlen_k,
907
- dropout_p,
908
- softmax_scale,
909
- causal,
910
- window_size,
911
- softcap,
912
- alibi_slopes,
913
- deterministic,
914
- return_softmax,
915
- block_table,
916
- is_grad_enabled,
917
- ):
918
- is_grad = is_grad_enabled and any(
919
- x.requires_grad for x in [q, k, v]
920
- )
921
- if softmax_scale is None:
922
- softmax_scale = q.shape[-1] ** (-0.5)
923
- head_size_og = q.size(2)
924
- if head_size_og % 8 != 0:
925
- q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
926
- k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
927
- v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
928
- out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_varlen_forward(
929
- q,
930
- k,
931
- v,
932
- cu_seqlens_q,
933
- cu_seqlens_k,
934
- max_seqlen_q,
935
- max_seqlen_k,
936
- dropout_p,
937
- softmax_scale,
938
- causal=causal,
939
- window_size_left=window_size[0],
940
- window_size_right=window_size[1],
941
- softcap=softcap,
942
- alibi_slopes=alibi_slopes,
943
- return_softmax=return_softmax and dropout_p > 0,
944
- block_table=block_table,
945
- )
946
- if is_grad:
947
- ctx.save_for_backward(
948
- q, k, v, out_padded, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state
949
- )
950
- ctx.dropout_p = dropout_p
951
- ctx.max_seqlen_q = max_seqlen_q
952
- ctx.max_seqlen_k = max_seqlen_k
953
- ctx.softmax_scale = softmax_scale
954
- ctx.causal = causal
955
- ctx.window_size = window_size
956
- ctx.softcap = softcap
957
- ctx.alibi_slopes = alibi_slopes
958
- ctx.deterministic = deterministic
959
-
960
- out = out_padded[..., :head_size_og]
961
- return out if not return_softmax else (out, softmax_lse, S_dmask)
962
-
963
- @staticmethod
964
- def backward(ctx, dout, *args):
965
- q, k, v, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state = ctx.saved_tensors
966
- dq, dk, dv = torch.empty_like(q), torch.empty_like(k), torch.empty_like(v)
967
- head_size_og = dout.size(2)
968
- dout_padded = dout
969
- if head_size_og % 8 != 0:
970
- dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
971
- _wrapped_flash_attn_varlen_backward(
972
- dout_padded,
973
- q,
974
- k,
975
- v,
976
- out,
977
- softmax_lse,
978
- dq,
979
- dk,
980
- dv,
981
- cu_seqlens_q,
982
- cu_seqlens_k,
983
- ctx.max_seqlen_q,
984
- ctx.max_seqlen_k,
985
- ctx.dropout_p,
986
- ctx.softmax_scale,
987
- ctx.causal,
988
- ctx.window_size[0],
989
- ctx.window_size[1],
990
- ctx.softcap,
991
- ctx.alibi_slopes,
992
- ctx.deterministic,
993
- rng_state=rng_state,
994
- )
995
- dq = dq[..., : dout.shape[-1]] # We could have padded the head dimension
996
- dk = dk[..., : dout.shape[-1]]
997
- dv = dv[..., : dout.shape[-1]]
998
- return dq, dk, dv, None, None, None, None, None, None, None, None, None, None, None, None, None, None
999
-
1000
-
1001
- def flash_attn_qkvpacked_func(
1002
- qkv,
1003
- dropout_p=0.0,
1004
- softmax_scale=None,
1005
- causal=False,
1006
- window_size=(-1, -1), # -1 means infinite context window
1007
- softcap=0.0, # <=0.0 means deactivate
1008
- alibi_slopes=None,
1009
- deterministic=False,
1010
- return_attn_probs=False,
1011
- ):
1012
- """dropout_p should be set to 0.0 during evaluation
1013
- If Q, K, V are already stacked into 1 tensor, this function will be faster than
1014
- calling flash_attn_func on Q, K, V since the backward pass avoids explicit concatenation
1015
- of the gradients of Q, K, V.
1016
- For multi-query and grouped-query attention (MQA/GQA), please see
1017
- flash_attn_kvpacked_func and flash_attn_func.
1018
-
1019
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
1020
- will only attend to keys between [i - window_size[0], i + window_size[1]] inclusive.
1021
-
1022
- Arguments:
1023
- qkv: (batch_size, seqlen, 3, nheads, headdim)
1024
- dropout_p: float. Dropout probability.
1025
- softmax_scale: float. The scaling of QK^T before applying softmax.
1026
- Default to 1 / sqrt(headdim).
1027
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1028
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1029
- softcap: float. Anything > 0 activates softcapping attention.
1030
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of (-alibi_slope * |i - j|) is added to
1031
- the attention score of query i and key j.
1032
- deterministic: bool. Whether to use the deterministic implementation of the backward pass,
1033
- which is slightly slower and uses more memory. The forward pass is always deterministic.
1034
- return_attn_probs: bool. Whether to return the attention probabilities. This option is for
1035
- testing only. The returned probabilities are not guaranteed to be correct
1036
- (they might not have the right scaling).
1037
- Return:
1038
- out: (batch_size, seqlen, nheads, headdim).
1039
- softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
1040
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1041
- normalization factor).
1042
- S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
1043
- The output of softmax (possibly with different scaling). It also encodes the dropout
1044
- pattern (negative means that location was dropped, nonnegative means it was kept).
1045
- """
1046
- return FlashAttnQKVPackedFunc.apply(
1047
- qkv,
1048
- dropout_p,
1049
- softmax_scale,
1050
- causal,
1051
- window_size,
1052
- softcap,
1053
- alibi_slopes,
1054
- deterministic,
1055
- return_attn_probs,
1056
- torch.is_grad_enabled(),
1057
- )
1058
-
1059
-
1060
- def flash_attn_kvpacked_func(
1061
- q,
1062
- kv,
1063
- dropout_p=0.0,
1064
- softmax_scale=None,
1065
- causal=False,
1066
- window_size=(-1, -1), # -1 means infinite context window
1067
- softcap=0.0, # 0.0 means deactivated
1068
- alibi_slopes=None,
1069
- deterministic=False,
1070
- return_attn_probs=False,
1071
- ):
1072
- """dropout_p should be set to 0.0 during evaluation
1073
- If K, V are already stacked into 1 tensor, this function will be faster than
1074
- calling flash_attn_func on Q, K, V since the backward pass avoids explicit concatenation
1075
- of the gradients of K, V.
1076
- Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
1077
- than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
1078
- For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
1079
- 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
1080
-
1081
- If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
1082
- For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
1083
- 1 1 1 1 0
1084
- 1 1 1 1 1
1085
- If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
1086
- 0 0
1087
- 0 0
1088
- 0 0
1089
- 1 0
1090
- 1 1
1091
- If the row of the mask is all zero, the output will be zero.
1092
-
1093
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
1094
- will only attend to keys between
1095
- [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
1096
-
1097
- Arguments:
1098
- q: (batch_size, seqlen, nheads, headdim)
1099
- kv: (batch_size, seqlen, 2, nheads_k, headdim)
1100
- dropout_p: float. Dropout probability.
1101
- softmax_scale: float. The scaling of QK^T before applying softmax.
1102
- Default to 1 / sqrt(headdim).
1103
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1104
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1105
- softcap: float. Anything > 0 activates softcapping attention.
1106
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
1107
- (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
1108
- is added to the attention score of query i and key j.
1109
- deterministic: bool. Whether to use the deterministic implementation of the backward pass,
1110
- which is slightly slower and uses more memory. The forward pass is always deterministic.
1111
- return_attn_probs: bool. Whether to return the attention probabilities. This option is for
1112
- testing only. The returned probabilities are not guaranteed to be correct
1113
- (they might not have the right scaling).
1114
- Return:
1115
- out: (batch_size, seqlen, nheads, headdim).
1116
- softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
1117
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1118
- normalization factor).
1119
- S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
1120
- The output of softmax (possibly with different scaling). It also encodes the dropout
1121
- pattern (negative means that location was dropped, nonnegative means it was kept).
1122
- """
1123
- return FlashAttnKVPackedFunc.apply(
1124
- q,
1125
- kv,
1126
- dropout_p,
1127
- softmax_scale,
1128
- causal,
1129
- window_size,
1130
- softcap,
1131
- alibi_slopes,
1132
- deterministic,
1133
- return_attn_probs,
1134
- torch.is_grad_enabled(),
1135
- )
1136
-
1137
-
1138
- def flash_attn_func(
1139
- q,
1140
- k,
1141
- v,
1142
- dropout_p=0.0,
1143
- softmax_scale=None,
1144
- causal=False,
1145
- window_size=(-1, -1), # -1 means infinite context window
1146
- softcap=0.0, # 0.0 means deactivated
1147
- alibi_slopes=None,
1148
- deterministic=False,
1149
- return_attn_probs=False,
1150
- ):
1151
- """dropout_p should be set to 0.0 during evaluation
1152
- Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
1153
- than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
1154
- For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
1155
- 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
1156
-
1157
- If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
1158
- For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
1159
- 1 1 1 1 0
1160
- 1 1 1 1 1
1161
- If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
1162
- 0 0
1163
- 0 0
1164
- 0 0
1165
- 1 0
1166
- 1 1
1167
- If the row of the mask is all zero, the output will be zero.
1168
-
1169
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
1170
- will only attend to keys between
1171
- [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
1172
-
1173
- Arguments:
1174
- q: (batch_size, seqlen, nheads, headdim)
1175
- k: (batch_size, seqlen, nheads_k, headdim)
1176
- v: (batch_size, seqlen, nheads_k, headdim)
1177
- dropout_p: float. Dropout probability.
1178
- softmax_scale: float. The scaling of QK^T before applying softmax.
1179
- Default to 1 / sqrt(headdim).
1180
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1181
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1182
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
1183
- (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
1184
- is added to the attention score of query i and key j.
1185
- deterministic: bool. Whether to use the deterministic implementation of the backward pass,
1186
- which is slightly slower and uses more memory. The forward pass is always deterministic.
1187
- return_attn_probs: bool. Whether to return the attention probabilities. This option is for
1188
- testing only. The returned probabilities are not guaranteed to be correct
1189
- (they might not have the right scaling).
1190
- Return:
1191
- out: (batch_size, seqlen, nheads, headdim).
1192
- softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
1193
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1194
- normalization factor).
1195
- S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
1196
- The output of softmax (possibly with different scaling). It also encodes the dropout
1197
- pattern (negative means that location was dropped, nonnegative means it was kept).
1198
- """
1199
- return FlashAttnFunc.apply(
1200
- q,
1201
- k,
1202
- v,
1203
- dropout_p,
1204
- softmax_scale,
1205
- causal,
1206
- window_size,
1207
- softcap,
1208
- alibi_slopes,
1209
- deterministic,
1210
- return_attn_probs,
1211
- torch.is_grad_enabled(),
1212
- )
1213
-
1214
-
1215
- def flash_attn_varlen_qkvpacked_func(
1216
- qkv,
1217
- cu_seqlens,
1218
- max_seqlen,
1219
- dropout_p=0.0,
1220
- softmax_scale=None,
1221
- causal=False,
1222
- window_size=(-1, -1), # -1 means infinite context window
1223
- softcap=0.0, # 0.0 means deactivated
1224
- alibi_slopes=None,
1225
- deterministic=False,
1226
- return_attn_probs=False,
1227
- ):
1228
- """dropout_p should be set to 0.0 during evaluation
1229
- If Q, K, V are already stacked into 1 tensor, this function will be faster than
1230
- calling flash_attn_varlen_func on Q, K, V since the backward pass avoids explicit concatenation
1231
- of the gradients of Q, K, V.
1232
- For multi-query and grouped-query attention (MQA/GQA), please see
1233
- flash_attn_varlen_kvpacked_func and flash_attn_varlen_func.
1234
-
1235
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
1236
- will only attend to keys between [i - window_size[0], i + window_size[1]] inclusive.
1237
-
1238
- Arguments:
1239
- qkv: (total, 3, nheads, headdim), where total = total number of tokens in the batch.
1240
- cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
1241
- of the sequences in the batch, used to index into qkv.
1242
- max_seqlen: int. Maximum sequence length in the batch.
1243
- dropout_p: float. Dropout probability.
1244
- softmax_scale: float. The scaling of QK^T before applying softmax.
1245
- Default to 1 / sqrt(headdim).
1246
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1247
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1248
- softcap: float. Anything > 0 activates softcapping attention.
1249
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of (-alibi_slope * |i - j|)
1250
- is added to the attention score of query i and key j.
1251
- deterministic: bool. Whether to use the deterministic implementation of the backward pass,
1252
- which is slightly slower and uses more memory. The forward pass is always deterministic.
1253
- return_attn_probs: bool. Whether to return the attention probabilities. This option is for
1254
- testing only. The returned probabilities are not guaranteed to be correct
1255
- (they might not have the right scaling).
1256
- Return:
1257
- out: (total, nheads, headdim).
1258
- softmax_lse [optional, if return_attn_probs=True]: (nheads, total_q_seqlen). The
1259
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1260
- normalization factor).
1261
- S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
1262
- The output of softmax (possibly with different scaling). It also encodes the dropout
1263
- pattern (negative means that location was dropped, nonnegative means it was kept).
1264
- """
1265
- return FlashAttnVarlenQKVPackedFunc.apply(
1266
- qkv,
1267
- cu_seqlens,
1268
- max_seqlen,
1269
- dropout_p,
1270
- softmax_scale,
1271
- causal,
1272
- window_size,
1273
- softcap,
1274
- alibi_slopes,
1275
- deterministic,
1276
- return_attn_probs,
1277
- torch.is_grad_enabled(),
1278
- )
1279
-
1280
-
1281
- def flash_attn_varlen_kvpacked_func(
1282
- q,
1283
- kv,
1284
- cu_seqlens_q,
1285
- cu_seqlens_k,
1286
- max_seqlen_q,
1287
- max_seqlen_k,
1288
- dropout_p=0.0,
1289
- softmax_scale=None,
1290
- causal=False,
1291
- window_size=(-1, -1), # -1 means infinite context window
1292
- softcap=0.0, # 0.0 means deactivated
1293
- alibi_slopes=None,
1294
- deterministic=False,
1295
- return_attn_probs=False,
1296
- ):
1297
- """dropout_p should be set to 0.0 during evaluation
1298
- If K, V are already stacked into 1 tensor, this function will be faster than
1299
- calling flash_attn_func on Q, K, V since the backward pass avoids explicit concatenation
1300
- of the gradients of K, V.
1301
- Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
1302
- than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
1303
- For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
1304
- 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
1305
-
1306
- If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
1307
- For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
1308
- 1 1 1 1 0
1309
- 1 1 1 1 1
1310
- If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
1311
- 0 0
1312
- 0 0
1313
- 0 0
1314
- 1 0
1315
- 1 1
1316
- If the row of the mask is all zero, the output will be zero.
1317
-
1318
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
1319
- will only attend to keys between
1320
- [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
1321
-
1322
- Arguments:
1323
- q: (total_q, nheads, headdim), where total_q = total number of query tokens in the batch.
1324
- kv: (total_k, 2, nheads_k, headdim), where total_k = total number of key tokens in the batch.
1325
- cu_seqlens_q: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
1326
- of the sequences in the batch, used to index into q.
1327
- cu_seqlens_k: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
1328
- of the sequences in the batch, used to index into kv.
1329
- max_seqlen_q: int. Maximum query sequence length in the batch.
1330
- max_seqlen_k: int. Maximum key sequence length in the batch.
1331
- dropout_p: float. Dropout probability.
1332
- softmax_scale: float. The scaling of QK^T before applying softmax.
1333
- Default to 1 / sqrt(headdim).
1334
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1335
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1336
- softcap: float. Anything > 0 activates softcapping attention.
1337
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
1338
- (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
1339
- is added to the attention score of query i and key j.
1340
- deterministic: bool. Whether to use the deterministic implementation of the backward pass,
1341
- which is slightly slower and uses more memory. The forward pass is always deterministic.
1342
- return_attn_probs: bool. Whether to return the attention probabilities. This option is for
1343
- testing only. The returned probabilities are not guaranteed to be correct
1344
- (they might not have the right scaling).
1345
- Return:
1346
- out: (total, nheads, headdim).
1347
- softmax_lse [optional, if return_attn_probs=True]: (nheads, total_q_seqlen). The
1348
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1349
- normalization factor).
1350
- S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
1351
- The output of softmax (possibly with different scaling). It also encodes the dropout
1352
- pattern (negative means that location was dropped, nonnegative means it was kept).
1353
- """
1354
- return FlashAttnVarlenKVPackedFunc.apply(
1355
- q,
1356
- kv,
1357
- cu_seqlens_q,
1358
- cu_seqlens_k,
1359
- max_seqlen_q,
1360
- max_seqlen_k,
1361
- dropout_p,
1362
- softmax_scale,
1363
- causal,
1364
- window_size,
1365
- softcap,
1366
- alibi_slopes,
1367
- deterministic,
1368
- return_attn_probs,
1369
- torch.is_grad_enabled(),
1370
- )
1371
-
1372
-
1373
- def flash_attn_varlen_func(
1374
- q,
1375
- k,
1376
- v,
1377
- cu_seqlens_q,
1378
- cu_seqlens_k,
1379
- max_seqlen_q,
1380
- max_seqlen_k,
1381
- dropout_p=0.0,
1382
- softmax_scale=None,
1383
- causal=False,
1384
- window_size=(-1, -1), # -1 means infinite context window
1385
- softcap=0.0, # 0.0 means deactivated
1386
- alibi_slopes=None,
1387
- deterministic=False,
1388
- return_attn_probs=False,
1389
- block_table=None,
1390
- ):
1391
- """dropout_p should be set to 0.0 during evaluation
1392
- Supports multi-query and grouped-query attention (MQA/GQA) by passing in K, V with fewer heads
1393
- than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
1394
- For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
1395
- 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
1396
-
1397
- If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
1398
- For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
1399
- 1 1 1 1 0
1400
- 1 1 1 1 1
1401
- If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
1402
- 0 0
1403
- 0 0
1404
- 0 0
1405
- 1 0
1406
- 1 1
1407
- If the row of the mask is all zero, the output will be zero.
1408
-
1409
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
1410
- will only attend to keys between
1411
- [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
1412
-
1413
- Arguments:
1414
- q: (total_q, nheads, headdim), where total_q = total number of query tokens in the batch.
1415
- k: (total_k, nheads_k, headdim), where total_k = total number of key tokens in the batch.
1416
- v: (total_k, nheads_k, headdim), where total_k = total number of key tokens in the batch.
1417
- cu_seqlens_q: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
1418
- of the sequences in the batch, used to index into q.
1419
- cu_seqlens_k: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
1420
- of the sequences in the batch, used to index into kv.
1421
- max_seqlen_q: int. Maximum query sequence length in the batch.
1422
- max_seqlen_k: int. Maximum key sequence length in the batch.
1423
- dropout_p: float. Dropout probability.
1424
- softmax_scale: float. The scaling of QK^T before applying softmax.
1425
- Default to 1 / sqrt(headdim).
1426
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1427
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1428
- softcap: float. Anything > 0 activates softcapping attention.
1429
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
1430
- (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
1431
- is added to the attention score of query i and key j.
1432
- deterministic: bool. Whether to use the deterministic implementation of the backward pass,
1433
- which is slightly slower and uses more memory. The forward pass is always deterministic.
1434
- return_attn_probs: bool. Whether to return the attention probabilities. This option is for
1435
- testing only. The returned probabilities are not guaranteed to be correct
1436
- (they might not have the right scaling).
1437
- Return:
1438
- out: (total, nheads, headdim).
1439
- softmax_lse [optional, if return_attn_probs=True]: (nheads, total_q_seqlen). The
1440
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1441
- normalization factor).
1442
- S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
1443
- The output of softmax (possibly with different scaling). It also encodes the dropout
1444
- pattern (negative means that location was dropped, nonnegative means it was kept).
1445
- """
1446
- return FlashAttnVarlenFunc.apply(
1447
- q,
1448
- k,
1449
- v,
1450
- cu_seqlens_q,
1451
- cu_seqlens_k,
1452
- max_seqlen_q,
1453
- max_seqlen_k,
1454
- dropout_p,
1455
- softmax_scale,
1456
- causal,
1457
- window_size,
1458
- softcap,
1459
- alibi_slopes,
1460
- deterministic,
1461
- return_attn_probs,
1462
- block_table,
1463
- torch.is_grad_enabled(),
1464
- )
1465
-
1466
-
1467
- def flash_attn_with_kvcache(
1468
- q,
1469
- k_cache,
1470
- v_cache,
1471
- k=None,
1472
- v=None,
1473
- rotary_cos=None,
1474
- rotary_sin=None,
1475
- cache_seqlens: Optional[Union[(int, torch.Tensor)]] = None,
1476
- cache_batch_idx: Optional[torch.Tensor] = None,
1477
- cache_leftpad: Optional[torch.Tensor] = None,
1478
- block_table: Optional[torch.Tensor] = None,
1479
- softmax_scale=None,
1480
- causal=False,
1481
- window_size=(-1, -1), # -1 means infinite context window
1482
- softcap=0.0, # 0.0 means deactivated
1483
- rotary_interleaved=True,
1484
- alibi_slopes=None,
1485
- num_splits=0,
1486
- return_softmax_lse=False,
1487
- ):
1488
- """
1489
- If k and v are not None, k_cache and v_cache will be updated *inplace* with the new values from
1490
- k and v. This is useful for incremental decoding: you can pass in the cached keys/values from
1491
- the previous step, and update them with the new keys/values from the current step, and do
1492
- attention with the updated cache, all in 1 kernel.
1493
-
1494
- If you pass in k / v, you must make sure that the cache is large enough to hold the new values.
1495
- For example, the KV cache could be pre-allocated with the max sequence length, and you can use
1496
- cache_seqlens to keep track of the current sequence lengths of each sequence in the batch.
1497
-
1498
- Also apply rotary embedding if rotary_cos and rotary_sin are passed in. The key @k will be
1499
- rotated by rotary_cos and rotary_sin at indices cache_seqlens, cache_seqlens + 1, etc.
1500
- If causal or local (i.e., window_size != (-1, -1)), the query @q will be rotated by rotary_cos
1501
- and rotary_sin at indices cache_seqlens, cache_seqlens + 1, etc.
1502
- If not causal and not local, the query @q will be rotated by rotary_cos and rotary_sin at
1503
- indices cache_seqlens only (i.e. we consider all tokens in @q to be at position cache_seqlens).
1504
-
1505
- See tests/test_flash_attn.py::test_flash_attn_kvcache for examples of how to use this function.
1506
-
1507
- Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
1508
- than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
1509
- For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
1510
- 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
1511
-
1512
- If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
1513
- For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
1514
- 1 1 1 1 0
1515
- 1 1 1 1 1
1516
- If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
1517
- 0 0
1518
- 0 0
1519
- 0 0
1520
- 1 0
1521
- 1 1
1522
- If the row of the mask is all zero, the output will be zero.
1523
-
1524
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
1525
- will only attend to keys between
1526
- [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
1527
-
1528
- Note: Does not support backward pass.
1529
-
1530
- Arguments:
1531
- q: (batch_size, seqlen, nheads, headdim)
1532
- k_cache: (batch_size_cache, seqlen_cache, nheads_k, headdim) if there's no block_table,
1533
- or (num_blocks, page_block_size, nheads_k, headdim) if there's a block_table (i.e. paged KV cache)
1534
- page_block_size must be a multiple of 256.
1535
- v_cache: (batch_size_cache, seqlen_cache, nheads_k, headdim) if there's no block_table,
1536
- or (num_blocks, page_block_size, nheads_k, headdim) if there's a block_table (i.e. paged KV cache)
1537
- k [optional]: (batch_size, seqlen_new, nheads_k, headdim). If not None, we concatenate
1538
- k with k_cache, starting at the indices specified by cache_seqlens.
1539
- v [optional]: (batch_size, seqlen_new, nheads_k, headdim). Similar to k.
1540
- rotary_cos [optional]: (seqlen_ro, rotary_dim / 2). If not None, we apply rotary embedding
1541
- to k and q. Only applicable if k and v are passed in. rotary_dim must be divisible by 16.
1542
- rotary_sin [optional]: (seqlen_ro, rotary_dim / 2). Similar to rotary_cos.
1543
- cache_seqlens: int, or (batch_size,), dtype torch.int32. The sequence lengths of the
1544
- KV cache.
1545
- cache_batch_idx: (batch_size,), dtype torch.int32. The indices used to index into the KV cache.
1546
- If None, we assume that the batch indices are [0, 1, 2, ..., batch_size - 1].
1547
- If the indices are not distinct, and k and v are provided, the values updated in the cache
1548
- might come from any of the duplicate indices.
1549
- cache_leftpad: (batch_size,), dtype torch.int32. The index that the KV cache starts. If None, assume 0.
1550
- block_table [optional]: (batch_size, max_num_blocks_per_seq), dtype torch.int32.
1551
- softmax_scale: float. The scaling of QK^T before applying softmax.
1552
- Default to 1 / sqrt(headdim).
1553
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1554
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1555
- softcap: float. Anything > 0 activates softcapping attention.
1556
- rotary_interleaved: bool. Only applicable if rotary_cos and rotary_sin are passed in.
1557
- If True, rotary embedding will combine dimensions 0 & 1, 2 & 3, etc. If False,
1558
- rotary embedding will combine dimensions 0 & rotary_dim / 2, 1 & rotary_dim / 2 + 1
1559
- (i.e. GPT-NeoX style).
1560
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
1561
- (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
1562
- is added to the attention score of query i and key j.
1563
- num_splits: int. If > 1, split the key/value into this many chunks along the sequence.
1564
- If num_splits == 1, we don't split the key/value. If num_splits == 0, we use a heuristic
1565
- to automatically determine the number of splits.
1566
- Don't change this unless you know what you are doing.
1567
- return_softmax_lse: bool. Whether to return the logsumexp of the attention scores.
1568
-
1569
- Return:
1570
- out: (batch_size, seqlen, nheads, headdim).
1571
- softmax_lse [optional, if return_softmax_lse=True]: (batch_size, nheads, seqlen). The
1572
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1573
- normalization factor).
1574
- """
1575
- assert k_cache.stride(-1) == 1, "k_cache must have contiguous last dimension"
1576
- assert v_cache.stride(-1) == 1, "v_cache must have contiguous last dimension"
1577
- q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
1578
- if softmax_scale is None:
1579
- softmax_scale = q.shape[-1] ** (-0.5)
1580
- if cache_seqlens is not None and isinstance(cache_seqlens, int):
1581
- cache_seqlens = torch.full(
1582
- (k_cache.shape[0],), cache_seqlens, dtype=torch.int32, device=k_cache.device
1583
- )
1584
- cache_seqlens = maybe_contiguous(cache_seqlens)
1585
- cache_batch_idx = maybe_contiguous(cache_batch_idx)
1586
- block_table = maybe_contiguous(block_table)
1587
- out, softmax_lse = flash_attn_gpu.fwd_kvcache(
1588
- q,
1589
- k_cache,
1590
- v_cache,
1591
- k,
1592
- v,
1593
- cache_seqlens,
1594
- rotary_cos,
1595
- rotary_sin,
1596
- cache_batch_idx,
1597
- cache_leftpad,
1598
- block_table,
1599
- alibi_slopes,
1600
- None,
1601
- softmax_scale,
1602
- causal,
1603
- window_size[0],
1604
- window_size[1],
1605
- softcap,
1606
- rotary_interleaved,
1607
- num_splits,
1608
- )
1609
- return (out, softmax_lse) if return_softmax_lse else out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu118-x86_64-linux/flash_attn/layers/__init__.py DELETED
File without changes
build/torch26-cxx98-cu118-x86_64-linux/flash_attn/layers/patch_embed.py DELETED
@@ -1,67 +0,0 @@
1
- # We use the same API as https://github.com/rwightman/pytorch-image-models/blob/v0.6.11/timm/models/layers/patch_embed.py
2
- # But we use nn.Linear instead of Conv2d and it's about 8x faster.
3
-
4
- from functools import partial
5
-
6
- import torch.nn as nn
7
- from einops import rearrange
8
- from torch import _assert
9
- from torch.nn.modules.utils import _pair
10
-
11
- try:
12
- from flash_attn.ops.fused_dense import FusedDense
13
- except ImportError:
14
- FusedDense = None
15
-
16
-
17
- class PatchEmbed(nn.Module):
18
- """2D Image to Patch Embedding"""
19
-
20
- def __init__(
21
- self,
22
- img_size=224,
23
- patch_size=16,
24
- in_chans=3,
25
- embed_dim=768,
26
- norm_layer=None,
27
- flatten=True,
28
- bias=True,
29
- fused_bias_fc=False,
30
- ):
31
- super().__init__()
32
- img_size = _pair(img_size)
33
- patch_size = _pair(patch_size)
34
- self.img_size = img_size
35
- self.patch_size = patch_size
36
- self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
37
- self.num_patches = self.grid_size[0] * self.grid_size[1]
38
- self.flatten = flatten
39
- if fused_bias_fc and FusedDense is None:
40
- raise ImportError("fused_dense is not installed")
41
-
42
- linear_cls = nn.Linear if not fused_bias_fc or not bias else FusedDense
43
- self.proj = linear_cls(in_chans * patch_size[0] * patch_size[1], embed_dim, bias=bias)
44
- self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
45
-
46
- def forward(self, x):
47
- _, _, H, W = x.shape
48
- _assert(
49
- H == self.img_size[0],
50
- f"Input image height ({H}) doesn't match model ({self.img_size[0]}).",
51
- )
52
- _assert(
53
- W == self.img_size[1],
54
- f"Input image width ({W}) doesn't match model ({self.img_size[1]}).",
55
- )
56
- x = self.proj(
57
- rearrange(
58
- x,
59
- "b c (h p1) (w p2) -> b h w (c p1 p2)",
60
- p1=self.patch_size[0],
61
- p2=self.patch_size[1],
62
- )
63
- )
64
- if self.flatten:
65
- x = rearrange(x, "b h w c -> b (h w) c")
66
- x = self.norm(x)
67
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu118-x86_64-linux/flash_attn/layers/rotary.py DELETED
@@ -1,483 +0,0 @@
1
- # Copyright (c) 2025, Tri Dao
2
-
3
- import math
4
- from functools import partial
5
- from typing import Optional, Tuple, Union
6
-
7
- import torch
8
- from torch import Tensor
9
-
10
- from einops import rearrange, repeat
11
- # from flash_attn.ops.triton.rotary import apply_rotary
12
- from ..ops.triton.rotary import apply_rotary
13
-
14
-
15
- def rotate_half(x, interleaved=False):
16
- if not interleaved:
17
- x1, x2 = x.chunk(2, dim=-1)
18
- return torch.cat((-x2, x1), dim=-1)
19
- else:
20
- x1, x2 = x[..., ::2], x[..., 1::2]
21
- return rearrange(torch.stack((-x2, x1), dim=-1), "... d two -> ... (d two)", two=2)
22
-
23
-
24
- def apply_rotary_emb_torch(x, cos, sin, interleaved=False):
25
- """
26
- x: (batch_size, seqlen, nheads, headdim)
27
- cos, sin: (seqlen, rotary_dim / 2) or (batch_size, seqlen, rotary_dim / 2)
28
- """
29
- ro_dim = cos.shape[-1] * 2
30
- assert ro_dim <= x.shape[-1]
31
- cos = repeat(cos, "... d -> ... 1 (2 d)" if not interleaved else "... d -> ... 1 (d 2)")
32
- sin = repeat(sin, "... d -> ... 1 (2 d)" if not interleaved else "... d -> ... 1 (d 2)")
33
- return torch.cat(
34
- [x[..., :ro_dim] * cos + rotate_half(x[..., :ro_dim], interleaved) * sin, x[..., ro_dim:]],
35
- dim=-1,
36
- )
37
-
38
-
39
- class ApplyRotaryEmb(torch.autograd.Function):
40
- @staticmethod
41
- def forward(
42
- ctx,
43
- x,
44
- cos,
45
- sin,
46
- interleaved=False,
47
- inplace=False,
48
- seqlen_offsets: Union[int, Tensor] = 0,
49
- cu_seqlens: Optional[Tensor] = None,
50
- max_seqlen: Optional[int] = None,
51
- ):
52
- out = apply_rotary(
53
- x,
54
- cos,
55
- sin,
56
- seqlen_offsets=seqlen_offsets,
57
- cu_seqlens=cu_seqlens,
58
- max_seqlen=max_seqlen,
59
- interleaved=interleaved,
60
- inplace=inplace,
61
- )
62
- if isinstance(seqlen_offsets, int):
63
- ctx.save_for_backward(cos, sin, cu_seqlens) # Can't save int with save_for_backward
64
- ctx.seqlen_offsets = seqlen_offsets
65
- else:
66
- ctx.save_for_backward(cos, sin, cu_seqlens, seqlen_offsets)
67
- ctx.seqlen_offsets = None
68
- ctx.interleaved = interleaved
69
- ctx.inplace = inplace
70
- ctx.max_seqlen = max_seqlen
71
- return out if not inplace else x
72
-
73
- @staticmethod
74
- def backward(ctx, do):
75
- seqlen_offsets = ctx.seqlen_offsets
76
- if seqlen_offsets is None:
77
- cos, sin, cu_seqlens, seqlen_offsets = ctx.saved_tensors
78
- else:
79
- cos, sin, cu_seqlens = ctx.saved_tensors
80
- dx = apply_rotary(
81
- do,
82
- cos,
83
- sin,
84
- seqlen_offsets=seqlen_offsets,
85
- cu_seqlens=cu_seqlens,
86
- max_seqlen=ctx.max_seqlen,
87
- interleaved=ctx.interleaved,
88
- inplace=ctx.inplace,
89
- conjugate=True,
90
- )
91
- return dx, None, None, None, None, None, None, None
92
-
93
-
94
- def apply_rotary_emb(
95
- x,
96
- cos,
97
- sin,
98
- interleaved=False,
99
- inplace=False,
100
- seqlen_offsets: Union[int, Tensor] = 0,
101
- cu_seqlens: Optional[Tensor] = None,
102
- max_seqlen: Optional[int] = None,
103
- ):
104
- """
105
- Arguments:
106
- x: (batch_size, seqlen, nheads, headdim) if cu_seqlens is None
107
- else (total_seqlen, nheads, headdim)
108
- cos, sin: (seqlen_rotary, rotary_dim / 2)
109
- interleaved: if True, rotate pairs of even and odd dimensions (GPT-J style) instead
110
- of 1st half and 2nd half (GPT-NeoX style).
111
- inplace: if True, apply rotary embedding in-place.
112
- seqlen_offsets: (batch_size,) or int. Each sequence in x is shifted by this amount.
113
- Most commonly used in inference when we have KV cache.
114
- cu_seqlens: (batch + 1,) or None
115
- max_seqlen: int
116
- Return:
117
- out: (batch_size, seqlen, nheads, headdim) if cu_seqlens is None
118
- else (total_seqlen, nheads, headdim)
119
- rotary_dim must be <= headdim
120
- Apply rotary embedding to the first rotary_dim of x.
121
- """
122
- return ApplyRotaryEmb.apply(
123
- x, cos, sin, interleaved, inplace, seqlen_offsets, cu_seqlens, max_seqlen
124
- )
125
-
126
-
127
- # For backward compatibility
128
- apply_rotary_emb_func = apply_rotary_emb
129
-
130
-
131
- def _apply_rotary_emb_qkv(
132
- qkv,
133
- cos,
134
- sin,
135
- cos_k=None,
136
- sin_k=None,
137
- interleaved=False,
138
- inplace=False,
139
- conjugate=False,
140
- seqlen_offsets: Union[int, Tensor] = 0,
141
- num_heads_q: Optional[int] = None,
142
- ):
143
- apply_rotary_fn = partial(
144
- apply_rotary,
145
- interleaved=interleaved,
146
- inplace=inplace,
147
- conjugate=conjugate,
148
- seqlen_offsets=seqlen_offsets
149
- )
150
- if cos_k is None and sin_k is None and qkv.is_contiguous():
151
- # Call 1 kernel instead of 2 kernels
152
- # We need qkv to be contiguous so that when we reshape to combine (3, nheads)
153
- # dimensions, we get the same tensor
154
- if qkv.dim() == 5:
155
- batch, seqlen, three, nheads, headdim = qkv.shape
156
- assert three == 3
157
- # qk = rearrange(qkv[:, :, :2], "b s t h d -> b s (t h) d")
158
- qk = qkv[:, :, :2].reshape(batch, seqlen, -1, headdim)
159
- qk = apply_rotary_fn(qk, cos, sin)
160
- else:
161
- assert qkv.dim() == 4
162
- assert num_heads_q is not None
163
- num_heads_k = (qkv.shape[2] - num_heads_q) // 2
164
- assert qkv.shape[2] == num_heads_q + 2 * num_heads_k
165
- qk = qkv[:, :, :num_heads_q + num_heads_k]
166
- qk = apply_rotary_fn(qk, cos, sin)
167
- if not inplace:
168
- if qkv.dim() == 5:
169
- qkv = torch.cat([rearrange(qk, "b s (t h) d -> b s t h d", t=2), qkv[:, :, 2:]], dim=2)
170
- else:
171
- qkv = torch.cat([qk, qkv[:, :, num_heads_q + num_heads_k :]], dim=2)
172
- else:
173
- cos_k = cos if cos_k is None else cos_k
174
- sin_k = sin if sin_k is None else sin_k
175
- if qkv.dim() == 5:
176
- batch, seqlen, three, nheads, headdim = qkv.shape
177
- assert three == 3
178
- q, k = qkv[:, :, 0], qkv[:, :, 1]
179
- else:
180
- assert qkv.dim() == 4
181
- assert num_heads_q is not None
182
- num_heads_k = (qkv.shape[2] - num_heads_q) // 2
183
- assert qkv.shape[2] == num_heads_q + 2 * num_heads_k
184
- q, k = qkv[:, :, :num_heads_q], qkv[:, :, num_heads_q : num_heads_q + num_heads_k]
185
- q = apply_rotary_fn(q, cos, sin)
186
- k = apply_rotary_fn(k, cos_k, sin_k)
187
- if not inplace:
188
- if qkv.dim() == 5:
189
- qkv = torch.stack([q, k, qkv[:, :, 2]], dim=2)
190
- else:
191
- qkv = torch.cat([q, k, qkv[:, :, num_heads_q + num_heads_k:]], dim=2)
192
- return qkv
193
-
194
-
195
- class ApplyRotaryEmbQKV_(torch.autograd.Function):
196
- @staticmethod
197
- def forward(
198
- ctx,
199
- qkv,
200
- cos,
201
- sin,
202
- cos_k=None,
203
- sin_k=None,
204
- interleaved=False,
205
- seqlen_offsets: Union[int, torch.Tensor] = 0,
206
- num_heads_q: Optional[int] = None,
207
- ):
208
- # apply_rotary_emb_qkv_inplace(
209
- qkv = _apply_rotary_emb_qkv(
210
- qkv, cos, sin, cos_k, sin_k, interleaved=interleaved, inplace=True,
211
- seqlen_offsets=seqlen_offsets, num_heads_q=num_heads_q,
212
- )
213
- if isinstance(seqlen_offsets, int):
214
- ctx.save_for_backward(cos, sin, cos_k, sin_k)
215
- ctx.seqlen_offsets = seqlen_offsets
216
- else:
217
- ctx.save_for_backward(cos, sin, cos_k, sin_k, seqlen_offsets)
218
- ctx.seqlen_offsets = None
219
- ctx.interleaved = interleaved
220
- ctx.num_heads_q = num_heads_q
221
- return qkv
222
-
223
- @staticmethod
224
- def backward(ctx, dqkv):
225
- seqlen_offsets = ctx.seqlen_offsets
226
- if seqlen_offsets is None:
227
- cos, sin, cos_k, sin_k, seqlen_offsets = ctx.saved_tensors
228
- else:
229
- cos, sin, cos_k, sin_k = ctx.saved_tensors
230
- dqkv = _apply_rotary_emb_qkv(
231
- dqkv, cos, sin, cos_k, sin_k, interleaved=ctx.interleaved, inplace=True,
232
- seqlen_offsets=seqlen_offsets, num_heads_q=ctx.num_heads_q, conjugate=True,
233
- )
234
- return dqkv, None, None, None, None, None, None, None
235
-
236
-
237
- def apply_rotary_emb_qkv_(
238
- qkv,
239
- cos,
240
- sin,
241
- cos_k=None,
242
- sin_k=None,
243
- interleaved=False,
244
- seqlen_offsets: Union[int, torch.Tensor] = 0,
245
- num_heads_q: Optional[int] = None,
246
- ):
247
- """
248
- Arguments:
249
- qkv: (batch_size, seqlen, 3, nheads, headdim) or (batch_size, seqlen, num_heads_q + 2 * num_heads_k, headdim).
250
- If qkv has shape (batch_size, seqlen, num_heads_q + 2 * num_heads_k, headdim) (e.g. MQA / GQA),
251
- then num_heads_q must be provided.
252
- cos, sin: (seqlen, rotary_dim / 2)
253
- cos_k, sin_k: (seqlen, rotary_dim / 2), optional
254
- interleaved: if True, rotate pairs of even and odd dimensions (GPT-J style) instead of
255
- 1st half and 2nd half (GPT-NeoX style).
256
- seqlen_offsets: (batch_size,) or int. Each sequence in Q and K is shifted by this amount.
257
- Most commonly used in inference when we have KV cache.
258
- Return:
259
- qkv: (batch_size, seqlen, 3, nheads, headdim) or (batch_size, seqlen, num_heads_q + 2 * num_heads_k, headdim)
260
- rotary_dim must be <= headdim
261
- Apply rotary embedding *inplace* to the first rotary_dim of Q and K.
262
- """
263
- return ApplyRotaryEmbQKV_.apply(
264
- qkv, cos, sin, cos_k, sin_k, interleaved, seqlen_offsets, num_heads_q
265
- )
266
-
267
-
268
- class ApplyRotaryEmbKV_(torch.autograd.Function):
269
-
270
- @staticmethod
271
- def forward(ctx, kv, cos, sin, interleaved=False, seqlen_offsets: Union[int, torch.Tensor] = 0):
272
- batch, seqlen, two, nheads, headdim = kv.shape
273
- assert two == 2
274
- k = kv[:, :, 0]
275
- apply_rotary(
276
- k, cos, sin, seqlen_offsets=seqlen_offsets, interleaved=interleaved, inplace=True
277
- )
278
- if isinstance(seqlen_offsets, int):
279
- ctx.save_for_backward(cos, sin) # Can't save int with save_for_backward
280
- ctx.seqlen_offsets = seqlen_offsets
281
- else:
282
- ctx.save_for_backward(cos, sin, seqlen_offsets)
283
- ctx.seqlen_offsets = None
284
- ctx.interleaved = interleaved
285
- return kv
286
-
287
- @staticmethod
288
- def backward(ctx, dkv):
289
- seqlen_offsets = ctx.seqlen_offsets
290
- if seqlen_offsets is None:
291
- cos, sin, seqlen_offsets = ctx.saved_tensors
292
- else:
293
- cos, sin = ctx.saved_tensors
294
- apply_rotary(
295
- dkv[:, :, 0],
296
- cos,
297
- sin,
298
- seqlen_offsets=seqlen_offsets,
299
- interleaved=ctx.interleaved,
300
- inplace=True,
301
- conjugate=True,
302
- )
303
- return dkv, None, None, None, None
304
-
305
-
306
- apply_rotary_emb_kv_ = ApplyRotaryEmbKV_.apply
307
-
308
-
309
- def apply_rotary_emb_kv_(
310
- kv,
311
- cos,
312
- sin,
313
- interleaved=False,
314
- seqlen_offsets: Union[int, torch.Tensor] = 0,
315
- ):
316
- """
317
- Arguments:
318
- kv: (batch_size, seqlen, 2, nheads, headdim)
319
- cos, sin: (seqlen, rotary_dim / 2)
320
- interleaved: if True, rotate pairs of even and odd dimensions (GPT-J style) instead of
321
- 1st half and 2nd half (GPT-NeoX style).
322
- seqlen_offsets: (batch_size,) or int. Each sequence in Q and K is shifted by this amount.
323
- Most commonly used in inference when we have KV cache.
324
- Return:
325
- kv: (batch_size, seqlen, 2, nheads, headdim)
326
- rotary_dim must be <= headdim
327
- Apply rotary embedding *inplace* to the first rotary_dim of K.
328
- """
329
- return ApplyRotaryEmbKV_.apply(kv, cos, sin, interleaved, seqlen_offsets)
330
-
331
-
332
- class RotaryEmbedding(torch.nn.Module):
333
- """
334
- The rotary position embeddings from RoFormer_ (Su et. al).
335
- A crucial insight from the method is that the query and keys are
336
- transformed by rotation matrices which depend on the relative positions.
337
-
338
- Other implementations are available in the Rotary Transformer repo_ and in
339
- GPT-NeoX_, GPT-NeoX was an inspiration
340
-
341
- .. _RoFormer: https://arxiv.org/abs/2104.09864
342
- .. _repo: https://github.com/ZhuiyiTechnology/roformer
343
- .. _GPT-NeoX: https://github.com/EleutherAI/gpt-neox
344
-
345
- If scale_base is not None, this implements XPos (Sun et al., https://arxiv.org/abs/2212.10554).
346
- A recommended value for scale_base is 512: https://github.com/HazyResearch/flash-attention/issues/96
347
- Reference: https://github.com/sunyt32/torchscale/blob/main/torchscale/component/xpos_relative_position.py
348
- """
349
-
350
- def __init__(
351
- self,
352
- dim: int,
353
- base=10000.0,
354
- interleaved=False,
355
- scale_base=None,
356
- device=None,
357
- ):
358
- """
359
- interleaved: if True, rotate pairs of even and odd dimensions (GPT-J style) instead
360
- of 1st half and 2nd half (GPT-NeoX style).
361
- """
362
- super().__init__()
363
- self.dim = dim
364
- self.base = float(base)
365
- # Generate and save the inverse frequency buffer (non trainable)
366
- inv_freq = self._compute_inv_freq(device)
367
- self.register_buffer("inv_freq", inv_freq, persistent=False)
368
- self.interleaved = interleaved
369
- self.scale_base = scale_base
370
- scale = (
371
- (torch.arange(0, dim, 2, device=device, dtype=torch.float32) + 0.4 * dim) / (1.4 * dim)
372
- if scale_base is not None
373
- else None
374
- )
375
- self.register_buffer("scale", scale, persistent=False)
376
-
377
- self._seq_len_cached = 0
378
- self._cos_cached = None
379
- self._sin_cached = None
380
- self._cos_k_cached = None
381
- self._sin_k_cached = None
382
-
383
- def _compute_inv_freq(self, device=None):
384
- return 1.0 / (
385
- self.base
386
- ** (torch.arange(0, self.dim, 2, device=device, dtype=torch.float32) / self.dim)
387
- )
388
-
389
- def _update_cos_sin_cache(self, seqlen, device=None, dtype=None):
390
- # Reset the tables if the sequence length has changed,
391
- # if we're on a new device (possibly due to tracing for instance),
392
- # or if we're switching from inference mode to training
393
- if (
394
- seqlen > self._seq_len_cached
395
- or self._cos_cached is None
396
- or self._cos_cached.device != device
397
- or self._cos_cached.dtype != dtype
398
- or (self.training and self._cos_cached.is_inference())
399
- ):
400
- self._seq_len_cached = seqlen
401
- # We want fp32 here, not self.inv_freq.dtype, since the model could be loaded in bf16
402
- # And the output of arange can be quite large, so bf16 would lose a lot of precision.
403
- t = torch.arange(seqlen, device=device, dtype=torch.float32)
404
- # We want fp32 here as well since inv_freq will be multiplied with t, and the output
405
- # will be large. Having it in bf16 will lose a lot of precision and cause the
406
- # cos & sin output to change significantly.
407
- # We want to recompute self.inv_freq if it was not loaded in fp32
408
- if self.inv_freq.dtype != torch.float32:
409
- inv_freq = self._compute_inv_freq(device=device)
410
- else:
411
- inv_freq = self.inv_freq
412
- # Don't do einsum, it converts fp32 to bf16 under AMP
413
- # freqs = torch.einsum("i,j->ij", t, self.inv_freq)
414
- freqs = torch.outer(t, inv_freq)
415
- if self.scale is None:
416
- self._cos_cached = torch.cos(freqs).to(dtype)
417
- self._sin_cached = torch.sin(freqs).to(dtype)
418
- else:
419
- power = (
420
- torch.arange(seqlen, dtype=self.scale.dtype, device=self.scale.device)
421
- - seqlen // 2
422
- ) / self.scale_base
423
- scale = self.scale.to(device=power.device) ** rearrange(power, "s -> s 1")
424
- # We want the multiplication by scale to happen in fp32
425
- self._cos_cached = (torch.cos(freqs) * scale).to(dtype)
426
- self._sin_cached = (torch.sin(freqs) * scale).to(dtype)
427
- self._cos_k_cached = (torch.cos(freqs) / scale).to(dtype)
428
- self._sin_k_cached = (torch.sin(freqs) / scale).to(dtype)
429
-
430
- def forward(
431
- self,
432
- qkv: torch.Tensor,
433
- kv: Optional[torch.Tensor] = None,
434
- seqlen_offset: Union[int, torch.Tensor] = 0,
435
- max_seqlen: Optional[int] = None,
436
- num_heads_q: Optional[int] = None,
437
- ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
438
- """
439
- qkv: (batch, seqlen, 3, nheads, headdim) or (batch, seqlen, num_heads_q + 2 * num_heads_k, headdim)
440
- if kv is none, else it's just q of shape (batch, seqlen, nheads, headdim).
441
- If qkv has shape (batch, seqlen, num_heads_q + 2 * num_heads_k, headdim) (e.g. MQA / GQA),
442
- then num_heads_q must be provided.
443
- kv: (batch, seqlen, 2, nheads, headdim)
444
- seqlen_offset: (batch_size,) or int. Each sequence in x is shifted by this amount.
445
- Most commonly used in inference when we have KV cache.
446
- If it's a tensor of shape (batch_size,), then to update the cos / sin cache, one
447
- should pass in max_seqlen, which will update the cos / sin cache up to that length.
448
- Apply rotary embedding *inplace* to qkv and / or kv.
449
- """
450
- seqlen = qkv.shape[1]
451
- if max_seqlen is not None:
452
- self._update_cos_sin_cache(max_seqlen, device=qkv.device, dtype=qkv.dtype)
453
- elif isinstance(seqlen_offset, int):
454
- self._update_cos_sin_cache(seqlen + seqlen_offset, device=qkv.device, dtype=qkv.dtype)
455
- if kv is None:
456
- return apply_rotary_emb_qkv_(
457
- qkv,
458
- self._cos_cached,
459
- self._sin_cached,
460
- self._cos_k_cached if self.scale is not None else None,
461
- self._sin_k_cached if self.scale is not None else None,
462
- interleaved=self.interleaved,
463
- seqlen_offsets=seqlen_offset,
464
- num_heads_q=num_heads_q,
465
- )
466
- else:
467
- q = qkv
468
- q = apply_rotary_emb_func(
469
- q,
470
- self._cos_cached,
471
- self._sin_cached,
472
- interleaved=self.interleaved,
473
- inplace=True,
474
- seqlen_offsets=seqlen_offset,
475
- )
476
- kv = apply_rotary_emb_kv_(
477
- kv,
478
- self._cos_cached if self.scale is None else self._cos_k_cached,
479
- self._sin_cached if self.scale is None else self._sin_k_cached,
480
- interleaved=self.interleaved,
481
- seqlen_offsets=seqlen_offset,
482
- )
483
- return q, kv
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu118-x86_64-linux/flash_attn/ops/__init__.py DELETED
File without changes
build/torch26-cxx98-cu118-x86_64-linux/flash_attn/ops/activations.py DELETED
@@ -1,135 +0,0 @@
1
- # Copied from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/model/layers/activations.py
2
- import math
3
-
4
- import torch
5
- import torch.nn as nn
6
- import torch.nn.functional as F
7
-
8
- # 1/sqrt(2*pi)-> 0.3989423
9
- # 1/sqrt(2) -> 0.70710678
10
- # sqrt(2/pi) -> 0.79788456
11
-
12
- # this function is tanh approximation of gelu
13
- # actual gelu is:
14
- # x * 0.5 * (1.0 + torch.erf(x * 0.70710678))
15
- @torch.jit.script
16
- def bias_gelu(y, bias):
17
- x = bias + y
18
- return (x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))).to(dtype=y.dtype)
19
-
20
-
21
- # gradient of tanh approximation of gelu
22
- # gradient of actual gelu is:
23
- # 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x)
24
- @torch.jit.script
25
- def bias_gelu_back(g, y, bias):
26
- """Assume that y has shape (B, D) and bias has shape (D)"""
27
- x = bias + y
28
- tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
29
- # sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
30
- ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (
31
- 1 + tanh_out
32
- )
33
- grad_y = ff * g
34
- return grad_y.to(dtype=y.dtype), grad_y.sum(dim=(0), dtype=bias.dtype)
35
-
36
-
37
- class GeLUFunction(torch.autograd.Function):
38
- @staticmethod
39
- # bias is an optional argument
40
- def forward(ctx, input, bias):
41
- ctx.save_for_backward(input, bias)
42
- return bias_gelu(input, bias)
43
-
44
- @staticmethod
45
- def backward(ctx, grad_output):
46
- input, bias = ctx.saved_tensors
47
- tmp = bias_gelu_back(grad_output, input, bias)
48
- return tmp, tmp
49
-
50
-
51
- bias_gelu_impl = GeLUFunction.apply
52
-
53
- # this function is tanh approximation of gelu
54
- # actual gelu is:
55
- # x * 0.5 * (1.0 + torch.erf(x * 0.70710678))
56
- @torch.jit.script
57
- def gelu_fwd(x):
58
- return (x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))).to(dtype=x.dtype)
59
-
60
-
61
- # gradient of tanh approximation of gelu
62
- # gradient of actual gelu is:
63
- # 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x)
64
- @torch.jit.script
65
- def gelu_bwd(g, x):
66
- tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
67
- # sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
68
- ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (
69
- 1 + tanh_out
70
- )
71
- return (ff * g).to(dtype=x.dtype)
72
-
73
-
74
- class FastGeLUFunction(torch.autograd.Function):
75
- @staticmethod
76
- # bias is an optional argument
77
- def forward(ctx, input):
78
- ctx.save_for_backward(input)
79
- return gelu_fwd(input)
80
-
81
- @staticmethod
82
- def backward(ctx, grad_output):
83
- (input,) = ctx.saved_tensors
84
- tmp = gelu_bwd(grad_output, input)
85
- return tmp
86
-
87
-
88
- fast_gelu_impl = FastGeLUFunction.apply
89
-
90
-
91
- @torch.jit.script
92
- def relu_bwd(g, x):
93
- return torch.where(x >= 0, g, 0.0).to(dtype=x.dtype)
94
-
95
-
96
- @torch.jit.script
97
- def sqrelu_fwd(x):
98
- r = F.relu(x)
99
- return (r * r).to(dtype=x.dtype)
100
-
101
-
102
- @torch.jit.script
103
- def sqrelu_bwd(g, x):
104
- return (2.0 * g * F.relu(x)).to(dtype=x.dtype)
105
-
106
-
107
- swiglu_fwd_codestring = """
108
- template <typename T> T swiglu_fwd(T x, T y) {
109
- return float(x) * float(y) / (1.0f + ::exp(-float(x)));
110
- }
111
- """
112
- swiglu_bwd_codestring = """
113
- template <typename T> void swiglu_bwd(T x, T y, T g, T& dx, T& dy) {
114
- float x_sigmoid = 1.0f / (1.0f + ::exp(-float(x)));
115
- dx = x_sigmoid * (1 + float(x) * (1.0f - x_sigmoid)) * float(g) * float(y);
116
- dy = float(x) * x_sigmoid * float(g);
117
- }
118
- """
119
- swiglu_fwd = torch.cuda.jiterator._create_jit_fn(swiglu_fwd_codestring)
120
- swiglu_bwd = torch.cuda.jiterator._create_multi_output_jit_fn(swiglu_bwd_codestring, num_outputs=2)
121
-
122
-
123
- class SwiGLUFunction(torch.autograd.Function):
124
-
125
- @staticmethod
126
- def forward(ctx, x, y):
127
- ctx.save_for_backward(x, y)
128
- return swiglu_fwd(x, y)
129
-
130
- @staticmethod
131
- def backward(ctx, dout):
132
- x, y = ctx.saved_tensors
133
- return swiglu_bwd(x, y, dout)
134
-
135
- swiglu = SwiGLUFunction.apply
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu118-x86_64-linux/flash_attn/ops/fused_dense.py DELETED
@@ -1,688 +0,0 @@
1
- # Copyright (c) 2023, Tri Dao.
2
- # Inspired by https://github.com/NVIDIA/apex/blob/master/apex/fused_dense/fused_dense.py
3
- # We make it work with pytorch amp and with bfloat16.
4
- # The TensorParallel linear modules are inspired by https://github.com/NVIDIA/apex/blob/master/apex/transformer/tensor_parallel/layers.py
5
- from functools import partial
6
- from typing import Optional
7
-
8
- # import fused_dense_cuda # from apex
9
- import fused_dense_lib as fused_dense_cuda
10
- import torch
11
- import torch.nn as nn
12
- import torch.nn.functional as F
13
- from torch import Tensor
14
- from torch.distributed import ProcessGroup
15
-
16
- from flash_attn.utils.torch import custom_fwd, custom_bwd
17
- from flash_attn.ops.activations import gelu_bwd, relu_bwd, sqrelu_bwd, sqrelu_fwd
18
- from flash_attn.utils.distributed import (
19
- all_gather_raw,
20
- all_reduce,
21
- all_reduce_raw,
22
- reduce_scatter,
23
- reduce_scatter_raw,
24
- )
25
-
26
-
27
- class FusedDenseFunc(torch.autograd.Function):
28
- @staticmethod
29
- @custom_fwd
30
- def forward(
31
- ctx, x, weight, bias, return_residual=False, process_group=None, sequence_parallel=True
32
- ):
33
- """
34
- If process_group is not None and sequence_parallel=True, we're doing Tensor Parallel
35
- with sequence parallelism: we do an all_gather_raw of x before doing the matmul.
36
- """
37
- ctx.compute_weight_gradient = weight.requires_grad
38
- ctx.return_residual = return_residual
39
- ctx.process_group = process_group
40
- ctx.sequence_parallel = sequence_parallel
41
-
42
- if torch.is_autocast_enabled():
43
- x = x.to(dtype=torch.get_autocast_gpu_dtype())
44
- x = x.contiguous()
45
- if process_group is not None and sequence_parallel:
46
- # We want to kick off the all_gather early, before weight dtype conversion
47
- total_x, handle_x = all_gather_raw(x, process_group, async_op=True)
48
- else:
49
- total_x = x
50
-
51
- if torch.is_autocast_enabled():
52
- weight = weight.to(dtype=torch.get_autocast_gpu_dtype())
53
- bias = bias.to(dtype=torch.get_autocast_gpu_dtype()) if bias is not None else None
54
- weight = weight.contiguous()
55
- if process_group is not None and sequence_parallel:
56
- handle_x.wait()
57
- batch_shape, n = total_x.shape[:-1], total_x.shape[-1]
58
- batch_dim = batch_shape.numel()
59
- # https://github.com/pytorch/pytorch/blob/5b51849b48a7dbccd297286cc0110def4706f9e7/aten/src/ATen/native/cuda/Blas.cpp#L174
60
- if min(batch_dim, n, *weight.shape) > 65535 * 32:
61
- raise RuntimeError("fused_dense only supports matrix dims <= 2M")
62
- output = F.linear(total_x, weight, bias)
63
- if ctx.compute_weight_gradient:
64
- ctx.save_for_backward(x, weight)
65
- else:
66
- ctx.save_for_backward(weight)
67
- return output if not return_residual else (output, x)
68
-
69
- @staticmethod
70
- @custom_bwd
71
- def backward(ctx, grad_output, *args):
72
- grad_output = grad_output.contiguous()
73
- if ctx.return_residual:
74
- (grad_input,) = args
75
- grad_input = grad_input.contiguous()
76
- process_group = ctx.process_group
77
- sequence_parallel = ctx.sequence_parallel
78
- if ctx.compute_weight_gradient:
79
- x, weight = ctx.saved_tensors
80
- if process_group is not None and sequence_parallel:
81
- total_x, handle_x = all_gather_raw(x, process_group, async_op=True)
82
- else:
83
- total_x = x
84
- else:
85
- (weight,) = ctx.saved_tensors
86
- total_x = None
87
- batch_shape = grad_output.shape[:-1]
88
- batch_dim = batch_shape.numel()
89
- grad_output = grad_output.reshape(batch_dim, grad_output.shape[-1])
90
- if ctx.needs_input_grad[0]:
91
- if not ctx.return_residual:
92
- grad_input = F.linear(grad_output, weight.t())
93
- else:
94
- grad_input = torch.addmm(
95
- grad_input.reshape(batch_dim, grad_input.shape[-1]), grad_output, weight
96
- )
97
- grad_input = grad_input.reshape(*batch_shape, grad_input.shape[-1])
98
- if process_group is not None:
99
- reduce_fn = reduce_scatter_raw if sequence_parallel else all_reduce_raw
100
- grad_input, handle_grad_input = reduce_fn(grad_input, process_group, async_op=True)
101
- else:
102
- grad_input = None
103
- if ctx.needs_input_grad[1]:
104
- assert ctx.compute_weight_gradient
105
- if process_group is not None and sequence_parallel:
106
- handle_x.wait()
107
- grad_weight, grad_bias = fused_dense_cuda.linear_bias_wgrad(
108
- total_x.reshape(batch_dim, total_x.shape[-1]), grad_output, ctx.needs_input_grad[2]
109
- )
110
- else:
111
- grad_weight = None
112
- grad_bias = grad_output if ctx.needs_input_grad[2] else None
113
- if process_group is not None and ctx.needs_input_grad[0]:
114
- handle_grad_input.wait()
115
- return grad_input, grad_weight, grad_bias, None, None, None
116
-
117
-
118
- def fused_dense_func(
119
- x: Tensor,
120
- weight: Tensor,
121
- bias: Optional[Tensor] = None,
122
- return_residual: bool = False,
123
- process_group: Optional[ProcessGroup] = None,
124
- sequence_parallel: bool = True,
125
- ):
126
- dtype_eligible = x.dtype in [torch.float16, torch.bfloat16] or (
127
- x.dtype == torch.float32 and torch.is_autocast_enabled()
128
- )
129
- if x.is_cuda and weight.is_cuda and (bias is None or bias.is_cuda) and dtype_eligible:
130
- return FusedDenseFunc.apply(
131
- x, weight, bias, return_residual, process_group, sequence_parallel
132
- )
133
- else:
134
- assert process_group is None
135
- out = F.linear(x, weight, bias)
136
- return out if not return_residual else (out, x)
137
-
138
-
139
- class FusedDense(nn.Linear):
140
- def __init__(
141
- self,
142
- in_features: int,
143
- out_features: int,
144
- bias: bool = True,
145
- return_residual: bool = False,
146
- device=None,
147
- dtype=None,
148
- ) -> None:
149
- super().__init__(in_features, out_features, bias=bias, device=device, dtype=dtype)
150
- self.return_residual = return_residual
151
-
152
- def forward(self, x, process_group=None):
153
- """
154
- If process_group is not None, we're doing Tensor Parallel with sequence parallelism:
155
- we do an all_gather of x before doing the matmul.
156
- """
157
- return fused_dense_func(
158
- x,
159
- self.weight,
160
- self.bias,
161
- return_residual=self.return_residual,
162
- process_group=process_group,
163
- )
164
-
165
-
166
- class ColumnParallelLinear(nn.Linear):
167
- def __init__(
168
- self,
169
- in_features: int,
170
- out_features: int,
171
- process_group: ProcessGroup,
172
- bias: bool = True,
173
- sequence_parallel=True,
174
- multiple_of=1,
175
- device=None,
176
- dtype=None,
177
- ) -> None:
178
- world_size = torch.distributed.get_world_size(process_group)
179
- if out_features % multiple_of:
180
- raise ValueError(f"out_features ({out_features}) must be a multiple of {multiple_of}")
181
- multiple = out_features // multiple_of
182
- # We want to split @multiple across world_size, but it could be an uneven split
183
- div = multiple // world_size
184
- mod = multiple % world_size
185
- # The first @mod ranks get @div + 1 copies, the rest get @div copies
186
- local_multiple = div + int(torch.distributed.get_rank(process_group) < mod)
187
- super().__init__(
188
- in_features, local_multiple * multiple_of, bias=bias, device=device, dtype=dtype
189
- )
190
- self.process_group = process_group
191
- self.sequence_parallel = sequence_parallel
192
-
193
- def forward(self, x):
194
- # If self.sequence_parallel is True, we're doing Tensor Parallel with sequence parallelism:
195
- # we do an all_gather of x before doing the matmul.
196
- # If not, then the input is already gathered.
197
- return fused_dense_func(
198
- x,
199
- self.weight,
200
- self.bias,
201
- process_group=self.process_group,
202
- sequence_parallel=self.sequence_parallel,
203
- )
204
-
205
-
206
- class RowParallelLinear(nn.Linear):
207
- def __init__(
208
- self,
209
- in_features: int,
210
- out_features: int,
211
- process_group: ProcessGroup,
212
- bias: bool = True,
213
- sequence_parallel=True,
214
- multiple_of=1,
215
- device=None,
216
- dtype=None,
217
- ) -> None:
218
- world_size = torch.distributed.get_world_size(process_group)
219
- rank = torch.distributed.get_rank(process_group)
220
- if in_features % multiple_of:
221
- raise ValueError(f"in_features ({in_features}) must be a multiple of {multiple_of}")
222
- multiple = in_features // multiple_of
223
- # We want to split @multiple across world_size, but it could be an uneven split
224
- div = multiple // world_size
225
- mod = multiple % world_size
226
- # The first @mod ranks get @div + 1 copies, the rest get @div copies
227
- local_multiple = div + int(torch.distributed.get_rank(process_group) < mod)
228
- # Only rank 0 will have bias
229
- super().__init__(
230
- local_multiple * multiple_of,
231
- out_features,
232
- bias=bias and rank == 0,
233
- device=device,
234
- dtype=dtype,
235
- )
236
- self.process_group = process_group
237
- self.sequence_parallel = sequence_parallel
238
-
239
- def forward(self, x):
240
- """
241
- We're doing Tensor Parallel with sequence parallelism: we do the matmul and then
242
- a reduce_scatter of the result.
243
- """
244
- out = fused_dense_func(x, self.weight, self.bias)
245
- reduce_fn = reduce_scatter if self.sequence_parallel else all_reduce
246
- return reduce_fn(out, self.process_group)
247
-
248
-
249
- class FusedMLPFunc(torch.autograd.Function):
250
- @staticmethod
251
- @custom_fwd
252
- def forward(
253
- ctx,
254
- x,
255
- weight1,
256
- bias1,
257
- weight2,
258
- bias2,
259
- activation="gelu_approx",
260
- save_pre_act=True,
261
- return_residual=False,
262
- checkpoint_lvl=0,
263
- heuristic=0,
264
- process_group=None,
265
- sequence_parallel=True,
266
- ):
267
- """
268
- If process_group is not None and sequence_parallel=True, we're doing Tensor Parallel
269
- with sequence parallelism: we do an all_gather of x before doing the matmul.
270
- If sequence_parallel=False, then the input is already gathered.
271
-
272
- checkpoint_lvl:
273
- 0: no recomputation in the bwd
274
- 1: recompute gelu_out / relu_out in the bwd
275
- 2: recompute pre_act and gelu_out / relu_out in the bwd
276
- """
277
- assert -1 <= heuristic <= 4
278
- assert activation in ["gelu_approx", "relu", "sqrelu"]
279
- if activation == "sqrelu":
280
- assert heuristic == -1
281
- if not save_pre_act:
282
- checkpoint_lvl = 2
283
- assert checkpoint_lvl in [0, 1, 2]
284
- ctx.return_residual = return_residual
285
- ctx.process_group = process_group
286
- ctx.sequence_parallel = sequence_parallel
287
- ctx.checkpoint_lvl = checkpoint_lvl
288
- ctx.activation = activation
289
- ctx.heuristic = heuristic
290
-
291
- if torch.is_autocast_enabled():
292
- x = x.to(dtype=torch.get_autocast_gpu_dtype())
293
- x = x.contiguous()
294
- if process_group is not None and sequence_parallel:
295
- # We want to kick off the all_gather early, before weight dtype conversion
296
- total_x, handle_x = all_gather_raw(x, process_group, async_op=True)
297
- else:
298
- total_x = x
299
-
300
- if torch.is_autocast_enabled():
301
- dtype = torch.get_autocast_gpu_dtype()
302
- weight1, weight2 = [a.to(dtype=dtype) for a in [weight1, weight2]]
303
- bias1 = bias1.to(dtype=dtype) if bias1 is not None else None
304
- bias2 = bias2.to(dtype=dtype) if bias2 is not None else None
305
- weight1 = weight1.contiguous()
306
- bias1 = bias1.contiguous() if bias1 is not None else None
307
- weight2 = weight2.contiguous()
308
- bias2 = bias2.contiguous() if bias2 is not None else None
309
- if process_group is not None and sequence_parallel:
310
- handle_x.wait()
311
- batch_shape, n = total_x.shape[:-1], total_x.shape[-1]
312
- batch_dim = batch_shape.numel()
313
- # https://github.com/pytorch/pytorch/blob/5b51849b48a7dbccd297286cc0110def4706f9e7/aten/src/ATen/native/cuda/Blas.cpp#L174
314
- if min(batch_dim, n, *weight1.shape, *weight2.shape) > 65535 * 32:
315
- raise RuntimeError("fused_dense only supports matrix dims <= 2M")
316
- if heuristic == -1:
317
- pre_act = F.linear(total_x, weight1, bias1)
318
- activation_fn = (
319
- partial(F.gelu, approximate="tanh")
320
- if activation == "gelu_approx"
321
- else (sqrelu_fwd if activation == "sqrelu" else F.relu)
322
- )
323
- with torch.jit.fuser("fuser2"):
324
- output1 = activation_fn(pre_act)
325
- # This is before adding bias1
326
- # pre_act = F.linear(total_x.reshape(batch_dim, n), weight1)
327
- # with torch.jit.fuser('fuser2'):
328
- # output1 = bias_gelu(pre_act, bias1)
329
- else:
330
- is_gelu = activation == "gelu_approx"
331
- output1, *rest = fused_dense_cuda.linear_act_forward(
332
- total_x.reshape(batch_dim, n), weight1, bias1, is_gelu, save_pre_act, heuristic
333
- )
334
- if save_pre_act:
335
- pre_act = rest[0]
336
- output2 = F.linear(output1, weight2, bias2)
337
- if checkpoint_lvl == 0 or (checkpoint_lvl == 1 and activation == "relu"):
338
- # For RELU the pre_act is very small (just a bit-mask) so we just save it
339
- ctx.save_for_backward(x, weight1, weight2, pre_act, output1)
340
- elif checkpoint_lvl == 1:
341
- ctx.save_for_backward(x, weight1, weight2, pre_act)
342
- elif checkpoint_lvl == 2:
343
- ctx.save_for_backward(x, weight1, weight2, bias1)
344
- output2 = output2.reshape(*batch_shape, output2.shape[-1])
345
- return output2 if not return_residual else (output2, x)
346
-
347
- @staticmethod
348
- @custom_bwd
349
- def backward(ctx, grad_output, *args):
350
- grad_output = grad_output.contiguous()
351
- checkpoint_lvl = ctx.checkpoint_lvl
352
- activation = ctx.activation
353
- activation_fn = (
354
- partial(F.gelu, approximate="tanh")
355
- if activation == "gelu_approx"
356
- else (sqrelu_fwd if activation == "sqrelu" else F.relu)
357
- )
358
- if ctx.return_residual:
359
- (grad_input,) = args
360
- grad_input = grad_input.contiguous()
361
- process_group = ctx.process_group
362
- sequence_parallel = ctx.sequence_parallel
363
- x, weight1, weight2, *rest = ctx.saved_tensors
364
- if process_group is None or not sequence_parallel:
365
- total_x = x
366
- batch_shape = grad_output.shape[:-1]
367
- batch_dim = batch_shape.numel()
368
- if checkpoint_lvl in [0, 1]:
369
- if process_group is not None and sequence_parallel:
370
- total_x, handle_x = all_gather_raw(x, process_group, async_op=True)
371
- if checkpoint_lvl == 0 or (checkpoint_lvl == 1 and activation == "relu"):
372
- pre_act, output1 = rest
373
- elif checkpoint_lvl == 1:
374
- (pre_act,) = rest
375
- with torch.jit.fuser("fuser2"):
376
- output1 = activation_fn(pre_act)
377
- elif checkpoint_lvl == 2:
378
- (bias1,) = rest
379
- if process_group is not None and sequence_parallel:
380
- total_x, _ = all_gather_raw(x, process_group)
381
- if ctx.heuristic == -1:
382
- pre_act = F.linear(total_x, weight1, bias1)
383
- with torch.jit.fuser("fuser2"):
384
- output1 = activation_fn(pre_act)
385
- else:
386
- output1, pre_act = fused_dense_cuda.linear_act_forward(
387
- total_x.reshape(batch_dim, total_x.shape[-1]),
388
- weight1,
389
- bias1,
390
- activation == "gelu_approx",
391
- True,
392
- ctx.heuristic,
393
- )
394
-
395
- grad_output = grad_output.reshape(batch_dim, grad_output.shape[-1])
396
- output1 = output1.reshape(batch_dim, output1.shape[-1])
397
- pre_act = pre_act.reshape(batch_dim, pre_act.shape[-1])
398
- if ctx.needs_input_grad[3]:
399
- grad_weight2, grad_bias2 = fused_dense_cuda.linear_bias_wgrad(
400
- output1, grad_output, ctx.needs_input_grad[4]
401
- )
402
- else:
403
- grad_weight2 = None
404
- grad_bias2 = grad_output if ctx.needs_input_grad[4] else None
405
- if ctx.heuristic == -1:
406
- # grad_pre_act = matmul_dgelu(grad_output, weight2, pre_act)
407
- grad_output1 = F.linear(grad_output, weight2.t())
408
- activation_grad_fn = (
409
- gelu_bwd
410
- if activation == "gelu_approx"
411
- else (sqrelu_bwd if activation == "sqrelu" else relu_bwd)
412
- )
413
- with torch.jit.fuser("fuser2"):
414
- grad_pre_act = activation_grad_fn(grad_output1, pre_act)
415
- else:
416
- # The cublasLt epilogue has to compute both gelu/relu grad and bias grad, we can't
417
- # just compute gelu/relu grad
418
- grad_pre_act, grad_bias1 = fused_dense_cuda.bias_act_linear_dgrad_bgrad(
419
- weight2, grad_output, pre_act, activation == "gelu_approx", ctx.heuristic
420
- )
421
- if not ctx.needs_input_grad[2]:
422
- grad_bias1 = None
423
- if ctx.needs_input_grad[0]:
424
- if not ctx.return_residual:
425
- grad_input = F.linear(grad_pre_act, weight1.t())
426
- else:
427
- grad_input = torch.addmm(
428
- grad_input.reshape(batch_dim, grad_input.shape[-1]), grad_pre_act, weight1
429
- )
430
- grad_input = grad_input.reshape(*batch_shape, grad_input.shape[-1])
431
- if process_group is not None:
432
- reduce_fn = reduce_scatter_raw if sequence_parallel else all_reduce_raw
433
- grad_input, handle_grad_input = reduce_fn(grad_input, process_group, async_op=True)
434
- else:
435
- grad_input = None
436
- if ctx.heuristic == -1:
437
- if ctx.needs_input_grad[1]:
438
- if process_group is not None and sequence_parallel and checkpoint_lvl != 2:
439
- handle_x.wait()
440
- grad_weight1, grad_bias1 = fused_dense_cuda.linear_bias_wgrad(
441
- total_x.reshape(batch_dim, total_x.shape[-1]),
442
- grad_pre_act,
443
- ctx.needs_input_grad[2],
444
- )
445
- else:
446
- grad_weight1 = None
447
- grad_bias1 = grad_pre_act if ctx.needs_input_grad[2] else None
448
- else:
449
- if ctx.needs_input_grad[1]:
450
- if process_group is not None and sequence_parallel and checkpoint_lvl != 2:
451
- handle_x.wait()
452
- grad_weight1 = F.linear(
453
- grad_pre_act.t(), total_x.reshape(batch_dim, total_x.shape[-1]).t()
454
- )
455
- else:
456
- grad_weight1 = None
457
- if process_group is not None and ctx.needs_input_grad[0]:
458
- handle_grad_input.wait()
459
- return (
460
- grad_input,
461
- grad_weight1,
462
- grad_bias1,
463
- grad_weight2,
464
- grad_bias2,
465
- None,
466
- None,
467
- None,
468
- None,
469
- None,
470
- None,
471
- None,
472
- )
473
-
474
-
475
- def fused_mlp_func(
476
- x: Tensor,
477
- weight1: Tensor,
478
- weight2: Tensor,
479
- bias1: Optional[Tensor] = None,
480
- bias2: Optional[Tensor] = None,
481
- activation: str = "gelu_approx",
482
- save_pre_act: bool = True,
483
- return_residual: bool = False,
484
- checkpoint_lvl: int = 0,
485
- heuristic: int = 0,
486
- process_group: Optional[ProcessGroup] = None,
487
- sequence_parallel: bool = True,
488
- ):
489
- assert activation in ["gelu_approx", "relu", "sqrelu"]
490
- dtype_eligible = x.dtype in [torch.float16, torch.bfloat16] or (
491
- x.dtype == torch.float32 and torch.is_autocast_enabled()
492
- )
493
- # If we save pre-activation, dimension must be divisible by 128 (relu) or 8 (gelu)
494
- dim_eligible = not save_pre_act or (x.shape[-1] % (128 if activation == "relu" else 8) == 0)
495
- if (
496
- x.is_cuda
497
- and weight1.is_cuda
498
- and weight2.is_cuda
499
- and (bias1 is None or bias1.is_cuda)
500
- and (bias2 is None or bias2.is_cuda)
501
- and dtype_eligible
502
- and dim_eligible
503
- ):
504
- return FusedMLPFunc.apply(
505
- x,
506
- weight1,
507
- bias1,
508
- weight2,
509
- bias2,
510
- activation,
511
- save_pre_act,
512
- return_residual,
513
- checkpoint_lvl,
514
- heuristic,
515
- process_group,
516
- sequence_parallel,
517
- )
518
- else:
519
- assert process_group is None
520
- pre_act = F.linear(x, weight1, bias1)
521
- activation_fn = (
522
- partial(F.gelu, approximate="tanh")
523
- if activation == "gelu_approx"
524
- else partial(F.relu, inplace=True)
525
- )
526
- output1 = activation_fn(pre_act)
527
- output2 = F.linear(output1, weight2, bias2)
528
- return output2 if not return_residual else (output2, x)
529
-
530
-
531
- class FusedMLP(nn.Module):
532
- def __init__(
533
- self,
534
- in_features,
535
- hidden_features=None,
536
- out_features=None,
537
- bias1=True,
538
- bias2=True,
539
- activation="gelu_approx",
540
- return_residual=False,
541
- checkpoint_lvl=0,
542
- heuristic="auto",
543
- device=None,
544
- dtype=None,
545
- ):
546
- """
547
- If process_group is not None, we're doing Tensor Parallel with sequence parallelism:
548
- we do an all_gather of x before doing the matmul, gelu, then matmul.
549
- Finally we do a reduce_scatter of the output.
550
-
551
- checkpoint_lvl (increasing lvl means slower but more memory saving):
552
- 0: no recomputation in the bwd
553
- 1: recompute gelu_out in the bwd
554
- 2: recompute pre_act and gelu_out in the bwd
555
- heuristic:
556
- -1: don't fuse gemm + gelu (separate kernel)
557
- 0..4: use this heuristic for the algo section in the fused gemm + gelu
558
- 'auto': heuristic will be picked automatically:
559
- For CUDA >= 11.8, we set heuristic=0 for both fp16 and bf16 for best perf.
560
- For CUDA <= 11.7, we set heuristic=1 for fp16 and heuristic=-1 for bf16.
561
- For H100, we set heuristic=-1 for both fp16 and bf16 as the fused cuBlasLt implementation
562
- is slower than the unfused version.
563
- return_residual: whether to return the input x along with the output. This is for
564
- performance reason: for post-norm architecture, returning the input allows us
565
- to fuse the backward of nn.Linear with the residual connection.
566
- """
567
- assert checkpoint_lvl in [0, 1, 2]
568
- assert activation in ["gelu_approx", "relu", "sqrelu"]
569
- factory_kwargs = {"device": device, "dtype": dtype}
570
- super().__init__()
571
- out_features = out_features or in_features
572
- hidden_features = hidden_features or in_features * 4
573
- self.activation = activation
574
- self.return_residual = return_residual
575
- self.checkpoint_lvl = checkpoint_lvl
576
- self.heuristic = heuristic if activation != "sqrelu" else -1
577
- self.fc1 = nn.Linear(in_features, hidden_features, bias=bias1, **factory_kwargs)
578
- self.fc2 = nn.Linear(hidden_features, out_features, bias=bias2, **factory_kwargs)
579
-
580
- def forward(self, x, process_group=None):
581
- dtype = x.dtype if not torch.is_autocast_enabled() else torch.get_autocast_gpu_dtype()
582
- if self.heuristic == "auto":
583
- if self.activation == "gelu_approx":
584
- if torch.cuda.get_device_capability("cuda") == (9, 0):
585
- heuristic = -1
586
- else:
587
- cuda_ver = tuple(map(int, torch.version.cuda.split(".")))
588
- heuristic = 0 if cuda_ver >= (11, 8) else (1 if dtype == torch.float16 else -1)
589
- else:
590
- heuristic = 0
591
- else:
592
- heuristic = self.heuristic
593
- out = fused_mlp_func(
594
- x,
595
- self.fc1.weight,
596
- self.fc2.weight,
597
- self.fc1.bias,
598
- self.fc2.bias,
599
- activation=self.activation,
600
- save_pre_act=self.training,
601
- return_residual=self.return_residual,
602
- checkpoint_lvl=self.checkpoint_lvl,
603
- heuristic=heuristic,
604
- process_group=process_group,
605
- )
606
- if self.return_residual:
607
- out, x = out
608
- if process_group is not None:
609
- out = reduce_scatter(out, process_group)
610
- return out if not self.return_residual else (out, x)
611
-
612
-
613
- class ParallelFusedMLP(nn.Module):
614
- def __init__(
615
- self,
616
- in_features,
617
- hidden_features=None,
618
- out_features=None,
619
- activation="gelu_approx",
620
- process_group: ProcessGroup = None,
621
- bias1=True,
622
- bias2=True,
623
- sequence_parallel=True,
624
- checkpoint_lvl=0,
625
- heuristic="auto",
626
- device=None,
627
- dtype=None,
628
- ):
629
- """
630
- process_group is required. We're doing Tensor Parallel with sequence parallelism:
631
- we do an all_gather of x before doing the matmul, gelu, then matmul.
632
- Finally we do a reduce_scatter of the output.
633
-
634
- checkpoint_lvl (increasing lvl means slower but more memory saving):
635
- 0: no recomputation in the bwd
636
- 1: recompute gelu_out in the bwd
637
- 2: recompute pre_act and gelu_out in the bwd
638
- heuristic:
639
- -1: don't fuse gemm + gelu (separate kernel)
640
- 0..4: use this heuristic for the algo section in the fused gemm + gelu
641
- 'auto': heuristic will be picked automatically:
642
- For CUDA >= 11.8, we set heuristic=0 for both fp16 and bf16 for best perf.
643
- For CUDA <= 11.7, we set heuristic=1 for fp16 and heuristic=-1 for bf16.
644
- """
645
- assert checkpoint_lvl in [0, 1, 2]
646
- assert activation in ["gelu_approx", "relu", "sqrelu"]
647
- assert process_group is not None
648
- factory_kwargs = {"device": device, "dtype": dtype}
649
- super().__init__()
650
- out_features = out_features or in_features
651
- hidden_features = hidden_features or in_features * 4
652
- self.activation = activation
653
- self.process_group = process_group
654
- self.sequence_parallel = sequence_parallel
655
- self.checkpoint_lvl = checkpoint_lvl
656
- self.heuristic = heuristic if activation != "sqrelu" else -1
657
- self.fc1 = ColumnParallelLinear(
658
- in_features, hidden_features, process_group, bias=bias1, **factory_kwargs
659
- )
660
- self.fc2 = RowParallelLinear(
661
- hidden_features, out_features, process_group, bias=bias2, **factory_kwargs
662
- )
663
-
664
- def forward(self, x):
665
- dtype = x.dtype if not torch.is_autocast_enabled() else torch.get_autocast_gpu_dtype()
666
- if self.heuristic == "auto":
667
- if self.activation == "gelu_approx":
668
- cuda_ver = tuple(map(int, torch.version.cuda.split(".")))
669
- heuristic = 0 if cuda_ver >= (11, 8) else (1 if dtype == torch.float16 else -1)
670
- else:
671
- heuristic = 0
672
- else:
673
- heuristic = self.heuristic
674
- out = fused_mlp_func(
675
- x,
676
- self.fc1.weight,
677
- self.fc2.weight,
678
- self.fc1.bias,
679
- self.fc2.bias,
680
- activation=self.activation,
681
- save_pre_act=self.training,
682
- checkpoint_lvl=self.checkpoint_lvl,
683
- heuristic=heuristic,
684
- process_group=self.process_group,
685
- sequence_parallel=self.sequence_parallel,
686
- )
687
- reduce_fn = reduce_scatter if self.sequence_parallel else all_reduce
688
- return reduce_fn(out, self.process_group)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu118-x86_64-linux/flash_attn/ops/layer_norm.py DELETED
@@ -1,800 +0,0 @@
1
- # Copyright (c) 2022, Tri Dao.
2
- # Adapted from https://github.com/NVIDIA/apex/blob/master/apex/contrib/layer_norm/layer_norm.py
3
-
4
- import dropout_layer_norm
5
- import torch
6
- from torch.nn import init
7
-
8
-
9
- def maybe_align(x, alignment_in_bytes=16):
10
- """Assume that x already has last dim divisible by alignment_in_bytes"""
11
- # TD [2023-07-04] I'm not 100% sure that clone will align the memory
12
- # https://discuss.pytorch.org/t/how-to-ensure-that-tensor-data-ptr-is-aligned-to-16-bytes/183440
13
- return x if x.data_ptr() % alignment_in_bytes == 0 else x.clone()
14
-
15
-
16
- def _dropout_add_layer_norm_forward(
17
- x0,
18
- residual,
19
- gamma,
20
- beta,
21
- rowscale,
22
- colscale,
23
- dropout_p,
24
- epsilon,
25
- residual_in_fp32=False,
26
- is_rms_norm=False,
27
- ):
28
- """Assume that arguments are contiguous and aligned to 16 bytes"""
29
- hidden_size = gamma.numel()
30
- x0mat = x0.view((-1, hidden_size))
31
- residualmat = residual.view((-1, hidden_size)) if residual is not None else None
32
- rowscale = rowscale.view(-1) if rowscale is not None else None
33
- zmat, xmat, dmask, mu, rsigma = dropout_layer_norm.dropout_add_ln_fwd(
34
- x0mat,
35
- residualmat,
36
- gamma,
37
- beta,
38
- rowscale,
39
- colscale,
40
- None,
41
- None,
42
- dropout_p,
43
- epsilon,
44
- 1.0,
45
- 0,
46
- None,
47
- residual_in_fp32,
48
- is_rms_norm,
49
- )
50
- # dmask is None if dropout_p == 0.0
51
- # xmat is None if dropout_p == 0.0 and residual is None and residual_dtype != input_dtype
52
- return zmat, xmat if xmat is not None else x0mat, dmask, mu, rsigma
53
-
54
-
55
- def _dropout_add_layer_norm_backward(
56
- dz,
57
- dx,
58
- x,
59
- x0,
60
- dmask,
61
- mu,
62
- rsigma,
63
- gamma,
64
- rowscale,
65
- colscale,
66
- dropout_p,
67
- has_residual,
68
- is_rms_norm=False,
69
- ):
70
- """Assume that arguments are contiguous and aligned to 16 bytes
71
- dx == None means that it was a post-norm architecture
72
- (x = drop(x0) + residual was not returned in the fwd).
73
- x0 must not be None if we have colscale.
74
- """
75
- hidden_size = gamma.numel()
76
- xmat = x.view((-1, hidden_size))
77
- dzmat = dz.view(xmat.shape)
78
- dxmat = dx.view(xmat.shape) if dx is not None else None
79
- x0mat = x0.view((-1, hidden_size)) if x0 is not None else None
80
- rowscale = rowscale.view(-1) if rowscale is not None else None
81
- if colscale is not None:
82
- assert x0 is not None, "x0 is required to compute the gradient of colscale"
83
- dx0mat, dresidualmat, dgamma, dbeta, _, _, *rest = dropout_layer_norm.dropout_add_ln_bwd(
84
- dzmat,
85
- dxmat,
86
- xmat,
87
- x0mat,
88
- dmask,
89
- mu,
90
- rsigma,
91
- gamma,
92
- rowscale,
93
- colscale,
94
- None,
95
- None,
96
- dropout_p,
97
- 1.0,
98
- 0,
99
- has_residual,
100
- is_rms_norm,
101
- )
102
- # dresidualmat is None if not has_residual
103
- if colscale is None:
104
- return dx0mat, dresidualmat, dgamma, dbeta
105
- else:
106
- dcolscale = rest[0]
107
- return dx0mat, dresidualmat, dgamma, dbeta, dcolscale
108
-
109
-
110
- def _dropout_add_layer_norm_subset_forward(
111
- x0,
112
- residual,
113
- gamma,
114
- beta,
115
- colscale,
116
- x0_subset,
117
- out_subset,
118
- dropout_p,
119
- epsilon,
120
- rowscale_const,
121
- out_numrows,
122
- residual_in_fp32=False,
123
- is_rms_norm=False,
124
- ):
125
- """Assume that arguments are contiguous and aligned to 16 bytes"""
126
- hidden_size = gamma.numel()
127
- x0mat = x0.view((-1, hidden_size))
128
- residualmat = residual.view((-1, hidden_size)) if residual is not None else None
129
- x0_subset = x0_subset.view(-1) if x0_subset is not None else None
130
- out_subset = out_subset.view(-1) if out_subset is not None else None
131
- zmat, xmat, dmask, mu, rsigma = dropout_layer_norm.dropout_add_ln_fwd(
132
- x0mat,
133
- residualmat,
134
- gamma,
135
- beta,
136
- None,
137
- colscale,
138
- x0_subset,
139
- out_subset,
140
- dropout_p,
141
- epsilon,
142
- rowscale_const,
143
- out_numrows,
144
- None,
145
- residual_in_fp32,
146
- is_rms_norm,
147
- )
148
- # dmask is None if dropout_p == 0.0
149
- # xmat is None if dropout_p == 0.0 and residual is None and residual_dtype != input_dtype
150
- return zmat, xmat if xmat is not None else x0mat, dmask, mu, rsigma
151
-
152
-
153
- def _dropout_add_layer_norm_subset_backward(
154
- dz,
155
- dx,
156
- x,
157
- x0,
158
- dmask,
159
- mu,
160
- rsigma,
161
- gamma,
162
- colscale,
163
- x0_subset,
164
- out_subset,
165
- dropout_p,
166
- rowscale_const,
167
- x0_numrows,
168
- has_residual,
169
- is_rms_norm=False,
170
- ):
171
- """Assume that arguments are contiguous and aligned to 16 bytes
172
- dx == None means that it was a post-norm architecture
173
- (x = drop(x0) + residual was not returned in the fwd).
174
- x0 must not be None if we have colscale.
175
- """
176
- hidden_size = gamma.numel()
177
- xmat = x.view((-1, hidden_size))
178
- dzmat = dz.view(-1, hidden_size)
179
- dxmat = dx.view(xmat.shape) if dx is not None else None
180
- x0mat = x0.view((-1, hidden_size)) if x0 is not None else None
181
- x0_subset = x0_subset.view(-1) if x0_subset is not None else None
182
- out_subset = out_subset.view(-1) if out_subset is not None else None
183
- if colscale is not None:
184
- assert x0 is not None, "x0 is required to compute the gradient of colscale"
185
- dx0mat, dresidualmat, dgamma, dbeta, _, _, *rest = dropout_layer_norm.dropout_add_ln_bwd(
186
- dzmat,
187
- dxmat,
188
- xmat,
189
- x0mat,
190
- dmask,
191
- mu,
192
- rsigma,
193
- gamma,
194
- None,
195
- colscale,
196
- x0_subset,
197
- out_subset,
198
- dropout_p,
199
- rowscale_const,
200
- x0_numrows,
201
- has_residual,
202
- is_rms_norm,
203
- )
204
- # dresidualmat is None if not has_residual
205
- if colscale is None:
206
- return dx0mat, dresidualmat, dgamma, dbeta
207
- else:
208
- dcolscale = rest[0]
209
- return dx0mat, dresidualmat, dgamma, dbeta, dcolscale
210
-
211
-
212
- def _dropout_add_layer_norm_parallel_residual_forward(
213
- x0,
214
- x1,
215
- residual,
216
- gamma0,
217
- beta0,
218
- gamma1,
219
- beta1,
220
- dropout_p,
221
- epsilon,
222
- residual_in_fp32=False,
223
- is_rms_norm=False,
224
- ):
225
- """Assume that arguments are contiguous and aligned to 16 bytes"""
226
- hidden_size = gamma0.numel()
227
- x0mat = x0.view((-1, hidden_size))
228
- x1mat = x1.view((-1, hidden_size)) if x1 is not None else None
229
- residualmat = residual.view((-1, hidden_size)) if residual is not None else None
230
- (
231
- z0mat,
232
- z1mat,
233
- xmat,
234
- dmask0,
235
- dmask1,
236
- mu,
237
- rsigma,
238
- ) = dropout_layer_norm.dropout_add_ln_parallel_residual_fwd(
239
- x0mat,
240
- x1mat,
241
- residualmat,
242
- gamma0,
243
- beta0,
244
- gamma1,
245
- beta1,
246
- dropout_p,
247
- epsilon,
248
- None,
249
- residual_in_fp32,
250
- is_rms_norm,
251
- )
252
- # dmask0 and dmask1 are None if dropout_p == 0.0
253
- # xmat is None if dropout_p == 0.0 and residual is None and residual_dtype != input_dtype
254
- return z0mat, z1mat, xmat if xmat is not None else x0mat, dmask0, dmask1, mu, rsigma
255
-
256
-
257
- def _dropout_add_layer_norm_parallel_residual_backward(
258
- dz0,
259
- dz1,
260
- dx,
261
- x,
262
- dmask0,
263
- dmask1,
264
- mu,
265
- rsigma,
266
- gamma0,
267
- gamma1,
268
- dropout_p,
269
- has_x1,
270
- has_residual,
271
- is_rms_norm=False,
272
- ):
273
- """Assume that arguments are contiguous and aligned to 16 bytes
274
- dx == None means that it was a post-norm architecture
275
- (x = drop(x0) + residual was not returned in the fwd).
276
- """
277
- hidden_size = gamma0.numel()
278
- xmat = x.view((-1, hidden_size))
279
- dz0mat = dz0.view(xmat.shape)
280
- dz1mat = dz1.view(xmat.shape) if dz1 is not None else None
281
- dxmat = dx.view(xmat.shape) if dx is not None else None
282
- (
283
- dx0mat,
284
- dx1mat,
285
- dresidualmat,
286
- dgamma0,
287
- dbeta0,
288
- dgamma1,
289
- dbeta1,
290
- *rest,
291
- ) = dropout_layer_norm.dropout_add_ln_parallel_residual_bwd(
292
- dz0mat,
293
- dz1mat,
294
- dxmat,
295
- xmat,
296
- dmask0,
297
- dmask1,
298
- mu,
299
- rsigma,
300
- gamma0,
301
- gamma1,
302
- dropout_p,
303
- has_x1,
304
- has_residual,
305
- is_rms_norm,
306
- )
307
- # dresidualmat is None if not has_residual
308
- return dx0mat, dx1mat, dresidualmat, dgamma0, dbeta0, dgamma1, dbeta1
309
-
310
-
311
- class DropoutAddLayerNormFn(torch.autograd.Function):
312
- @staticmethod
313
- def forward(
314
- ctx,
315
- x0,
316
- residual,
317
- gamma,
318
- beta,
319
- rowscale,
320
- colscale,
321
- dropout_p,
322
- epsilon,
323
- residual_in_fp32=False,
324
- prenorm=False,
325
- is_rms_norm=False,
326
- return_dmask=False,
327
- ):
328
- x0 = maybe_align(x0.contiguous(), 16)
329
- residual = maybe_align(residual.contiguous(), 16) if residual is not None else None
330
- gamma = maybe_align(gamma.contiguous(), 16)
331
- beta = maybe_align(beta.contiguous(), 16) if beta is not None else None
332
- rowscale = maybe_align(rowscale.contiguous(), 16) if rowscale is not None else None
333
- colscale = maybe_align(colscale.contiguous(), 16) if colscale is not None else None
334
- zmat, xmat, dmask, mu, rsigma = _dropout_add_layer_norm_forward(
335
- x0,
336
- residual,
337
- gamma,
338
- beta,
339
- rowscale,
340
- colscale,
341
- dropout_p,
342
- epsilon,
343
- residual_in_fp32,
344
- is_rms_norm,
345
- )
346
- # Only need to save x0 if we need to compute gradient wrt colscale
347
- x0_saved = x0 if colscale is not None else None
348
- ctx.save_for_backward(
349
- xmat.view(x0.shape), x0_saved, dmask, gamma, mu, rsigma, rowscale, colscale
350
- )
351
- ctx.prenorm = prenorm
352
- ctx.dropout_p = dropout_p
353
- ctx.has_residual = residual is not None
354
- ctx.is_rms_norm = is_rms_norm
355
- ctx.has_beta = beta is not None
356
- if not return_dmask:
357
- return (
358
- zmat.view(x0.shape) if not prenorm else (zmat.view(x0.shape), xmat.view(x0.shape))
359
- )
360
- else:
361
- dmask = (
362
- dmask.view(x0.shape)
363
- if dropout_p > 0.0
364
- else torch.ones(x0.shape, dtype=torch.uint8, device=x0.device)
365
- )
366
- ctx.mark_non_differentiable(dmask)
367
- return (
368
- (zmat.view(x0.shape), dmask)
369
- if not prenorm
370
- else (zmat.view(x0.shape), xmat.view(x0.shape), dmask)
371
- )
372
-
373
- @staticmethod
374
- def backward(ctx, dz, *args):
375
- # assert dz.is_contiguous()
376
- dz = maybe_align(dz.contiguous(), 16) # this happens!
377
- dx = maybe_align(args[0].contiguous(), 16) if ctx.prenorm else None
378
- x, x0, dmask, gamma, mu, rsigma, rowscale, colscale = ctx.saved_tensors
379
- # x0 is None if colscale is None
380
- dropout_p = ctx.dropout_p
381
- has_residual = ctx.has_residual
382
- dx0mat, dresidualmat, dgamma, dbeta, *rest = _dropout_add_layer_norm_backward(
383
- dz,
384
- dx,
385
- x,
386
- x0,
387
- dmask,
388
- mu,
389
- rsigma,
390
- gamma,
391
- rowscale,
392
- colscale,
393
- dropout_p,
394
- has_residual,
395
- ctx.is_rms_norm,
396
- )
397
- dx0 = dx0mat.view(x.shape)
398
- dresidual = dresidualmat.view(x.shape) if dresidualmat is not None else None
399
- dcolscale = rest[0] if colscale is not None else None
400
- return (
401
- dx0,
402
- dresidual,
403
- dgamma,
404
- dbeta if ctx.has_beta else None,
405
- None,
406
- dcolscale,
407
- None,
408
- None,
409
- None,
410
- None,
411
- None,
412
- None,
413
- )
414
-
415
-
416
- class DropoutAddLayerNormSubsetFn(torch.autograd.Function):
417
- @staticmethod
418
- def forward(
419
- ctx,
420
- x0,
421
- residual,
422
- gamma,
423
- beta,
424
- colscale,
425
- x0_subset,
426
- out_subset,
427
- dropout_p,
428
- epsilon,
429
- rowscale_const,
430
- out_numrows,
431
- residual_in_fp32=False,
432
- prenorm=False,
433
- is_rms_norm=False,
434
- return_dmask=False,
435
- ):
436
- x0 = maybe_align(x0.contiguous(), 16)
437
- residual = maybe_align(residual.contiguous(), 16) if residual is not None else None
438
- gamma = maybe_align(gamma.contiguous(), 16)
439
- beta = maybe_align(beta.contiguous(), 16) if beta is not None else None
440
- colscale = maybe_align(colscale.contiguous(), 16) if colscale is not None else None
441
- zmat, xmat, dmask, mu, rsigma = _dropout_add_layer_norm_subset_forward(
442
- x0,
443
- residual,
444
- gamma,
445
- beta,
446
- colscale,
447
- x0_subset,
448
- out_subset,
449
- dropout_p,
450
- epsilon,
451
- rowscale_const,
452
- out_numrows,
453
- residual_in_fp32,
454
- is_rms_norm,
455
- )
456
- # Only need to save x0 if we need to compute gradient wrt colscale
457
- x0_saved = x0 if colscale is not None else None
458
- x_shape = (-1, *x0.shape[1:])
459
- ctx.save_for_backward(
460
- xmat.view(x_shape), x0_saved, dmask, gamma, mu, rsigma, colscale, x0_subset, out_subset
461
- )
462
- ctx.prenorm = prenorm
463
- ctx.dropout_p = dropout_p
464
- ctx.rowscale_const = rowscale_const
465
- ctx.x0_numrows = x0.shape[:-1].numel()
466
- ctx.has_residual = residual is not None
467
- ctx.is_rms_norm = is_rms_norm
468
- ctx.has_beta = beta is not None
469
- z_shape = (-1, *x0.shape[1:])
470
- if not return_dmask:
471
- return zmat.view(z_shape) if not prenorm else (zmat.view(z_shape), xmat.view(x0.shape))
472
- else:
473
- z = zmat.view(z_shape)
474
- dmask = (
475
- dmask.view(x0.shape)
476
- if dropout_p > 0.0
477
- else torch.ones(x0.shape, dtype=torch.uint8, device=x0.device)
478
- )
479
- ctx.mark_non_differentiable(dmask)
480
- return (z, dmask) if not prenorm else (z, xmat.view(x_shape), dmask)
481
-
482
- @staticmethod
483
- def backward(ctx, dz, *args):
484
- # assert dz.is_contiguous()
485
- dz = maybe_align(dz.contiguous(), 16) # this happens!
486
- dx = maybe_align(args[0].contiguous(), 16) if ctx.prenorm else None
487
- x, x0, dmask, gamma, mu, rsigma, colscale, x0_subset, out_subset = ctx.saved_tensors
488
- # x0 is None if colscale is None
489
- dropout_p = ctx.dropout_p
490
- has_residual = ctx.has_residual
491
- dx0mat, dresidualmat, dgamma, dbeta, *rest = _dropout_add_layer_norm_subset_backward(
492
- dz,
493
- dx,
494
- x,
495
- x0,
496
- dmask,
497
- mu,
498
- rsigma,
499
- gamma,
500
- colscale,
501
- x0_subset,
502
- out_subset,
503
- dropout_p,
504
- ctx.rowscale_const,
505
- ctx.x0_numrows,
506
- has_residual,
507
- ctx.is_rms_norm,
508
- )
509
- dx0 = dx0mat.view(-1, *x.shape[1:])
510
- dresidual = dresidualmat.view(x.shape) if dresidualmat is not None else None
511
- dcolscale = rest[0] if colscale is not None else None
512
- return (
513
- dx0,
514
- dresidual,
515
- dgamma,
516
- dbeta if ctx.has_beta else None,
517
- dcolscale,
518
- None,
519
- None,
520
- None,
521
- None,
522
- None,
523
- None,
524
- None,
525
- None,
526
- None,
527
- None,
528
- )
529
-
530
-
531
- class DropoutAddLayerNormParallelResidualFn(torch.autograd.Function):
532
- @staticmethod
533
- def forward(
534
- ctx,
535
- x0,
536
- x1,
537
- residual,
538
- gamma0,
539
- beta0,
540
- gamma1,
541
- beta1,
542
- dropout_p,
543
- epsilon,
544
- residual_in_fp32=False,
545
- prenorm=False,
546
- is_rms_norm=False,
547
- return_dmask=False,
548
- ):
549
- x0 = maybe_align(x0.contiguous(), 16)
550
- x1 = maybe_align(x1.contiguous(), 16) if x1 is not None else None
551
- residual = maybe_align(residual.contiguous(), 16) if residual is not None else None
552
- gamma0 = maybe_align(gamma0.contiguous(), 16)
553
- beta0 = maybe_align(beta0.contiguous(), 16) if beta0 is not None else None
554
- gamma1 = maybe_align(gamma1.contiguous(), 16) if gamma1 is not None else None
555
- beta1 = maybe_align(beta1.contiguous(), 16) if beta1 is not None else None
556
- (
557
- z0mat,
558
- z1mat,
559
- xmat,
560
- dmask0,
561
- dmask1,
562
- mu,
563
- rsigma,
564
- ) = _dropout_add_layer_norm_parallel_residual_forward(
565
- x0,
566
- x1,
567
- residual,
568
- gamma0,
569
- beta0,
570
- gamma1,
571
- beta1,
572
- dropout_p,
573
- epsilon,
574
- residual_in_fp32,
575
- is_rms_norm,
576
- )
577
- ctx.save_for_backward(xmat.view(x0.shape), dmask0, dmask1, gamma0, gamma1, mu, rsigma)
578
- ctx.prenorm = prenorm
579
- ctx.dropout_p = dropout_p
580
- ctx.has_x1 = x1 is not None
581
- ctx.has_residual = residual is not None
582
- ctx.is_rms_norm = is_rms_norm
583
- ctx.has_beta = beta0 is not None
584
- z = (z0mat.view(x0.shape), z1mat.view(x0.shape) if z1mat is not None else None)
585
- if not return_dmask:
586
- return z if not prenorm else (*z, xmat.view(x0.shape))
587
- else:
588
- dmask0 = (
589
- dmask0.view(x0.shape)
590
- if dropout_p > 0.0
591
- else torch.ones(x0.shape, dtype=torch.uint8, device=x0.device)
592
- )
593
- dmask1 = (
594
- dmask1.view(x0.shape)
595
- if dropout_p > 0.0 and x1 is not None
596
- else torch.ones(x0.shape, dtype=torch.uint8, device=x0.device)
597
- )
598
- ctx.mark_non_differentiable(dmask0)
599
- ctx.mark_non_differentiable(dmask1)
600
- return (
601
- (*z, dmask0, dmask1) if not prenorm else (*z, xmat.view(x0.shape), dmask0, dmask1)
602
- )
603
-
604
- @staticmethod
605
- def backward(ctx, dz0, dz1, *args):
606
- dz0 = maybe_align(dz0.contiguous(), 16) # this happens!
607
- dz1 = maybe_align(dz1.contiguous(), 16) if dz1 is not None else None
608
- dx = maybe_align(args[0].contiguous(), 16) if ctx.prenorm else None
609
- x, dmask0, dmask1, gamma0, gamma1, mu, rsigma = ctx.saved_tensors
610
- dropout_p = ctx.dropout_p
611
- has_x1 = ctx.has_x1
612
- has_residual = ctx.has_residual
613
- (
614
- dx0mat,
615
- dx1mat,
616
- dresidualmat,
617
- dgamma0,
618
- dbeta0,
619
- dgamma1,
620
- dbeta1,
621
- ) = _dropout_add_layer_norm_parallel_residual_backward(
622
- dz0,
623
- dz1,
624
- dx,
625
- x,
626
- dmask0,
627
- dmask1,
628
- mu,
629
- rsigma,
630
- gamma0,
631
- gamma1,
632
- dropout_p,
633
- has_x1,
634
- has_residual,
635
- ctx.is_rms_norm,
636
- )
637
- dx0 = dx0mat.view(x.shape)
638
- dx1 = dx1mat.view(x.shape) if dx1mat is not None else None
639
- dresidual = dresidualmat.view(x.shape) if dresidualmat is not None else None
640
- return (
641
- dx0,
642
- dx1,
643
- dresidual,
644
- dgamma0,
645
- dbeta0 if ctx.has_beta else None,
646
- dgamma1,
647
- dbeta1 if ctx.has_beta else None,
648
- None,
649
- None,
650
- None,
651
- None,
652
- None,
653
- None,
654
- )
655
-
656
-
657
- def layer_norm(x, weight, bias, epsilon):
658
- return DropoutAddLayerNormFn.apply(x, None, weight, bias, None, None, 0.0, epsilon, False)
659
-
660
-
661
- def dropout_add_layer_norm(
662
- x0,
663
- residual,
664
- weight,
665
- bias,
666
- dropout_p,
667
- epsilon,
668
- rowscale=None,
669
- layerscale=None,
670
- prenorm=False,
671
- residual_in_fp32=False,
672
- return_dropout_mask=False,
673
- ):
674
- """residual_in_fp32 only has an effect if residual is None.
675
- Otherwise residual dtype is residual.dtype.
676
- """
677
- return DropoutAddLayerNormFn.apply(
678
- x0,
679
- residual,
680
- weight,
681
- bias,
682
- rowscale,
683
- layerscale,
684
- dropout_p,
685
- epsilon,
686
- residual_in_fp32,
687
- prenorm,
688
- False,
689
- return_dropout_mask,
690
- )
691
-
692
-
693
- def dropout_add_layer_norm_subset(
694
- x0,
695
- residual,
696
- weight,
697
- bias,
698
- dropout_p,
699
- epsilon,
700
- layerscale=None,
701
- x0_subset=None,
702
- out_subset=None,
703
- rowscale_const=1.0,
704
- out_numrows=0,
705
- prenorm=False,
706
- residual_in_fp32=False,
707
- return_dropout_mask=False,
708
- ):
709
- """residual_in_fp32 only has an effect if residual is None.
710
- Otherwise residual dtype is residual.dtype.
711
- """
712
- return DropoutAddLayerNormSubsetFn.apply(
713
- x0,
714
- residual,
715
- weight,
716
- bias,
717
- layerscale,
718
- x0_subset,
719
- out_subset,
720
- dropout_p,
721
- epsilon,
722
- rowscale_const,
723
- out_numrows,
724
- residual_in_fp32,
725
- prenorm,
726
- False,
727
- return_dropout_mask,
728
- )
729
-
730
-
731
- def dropout_add_layer_norm_parallel_residual(
732
- x0,
733
- x1,
734
- residual,
735
- weight0,
736
- bias0,
737
- weight1,
738
- bias1,
739
- dropout_p,
740
- epsilon,
741
- prenorm=False,
742
- residual_in_fp32=False,
743
- return_dropout_mask=False,
744
- ):
745
- """residual_in_fp32 only has an effect if residual is None.
746
- Otherwise residual dtype is residual.dtype.
747
- """
748
- return DropoutAddLayerNormParallelResidualFn.apply(
749
- x0,
750
- x1,
751
- residual,
752
- weight0,
753
- bias0,
754
- weight1,
755
- bias1,
756
- dropout_p,
757
- epsilon,
758
- residual_in_fp32,
759
- prenorm,
760
- False,
761
- return_dropout_mask,
762
- )
763
-
764
-
765
- class DropoutAddLayerNorm(torch.nn.Module):
766
- def __init__(
767
- self,
768
- hidden_size,
769
- prenorm=False,
770
- p=0.0,
771
- eps=1e-5,
772
- residual_in_fp32=False,
773
- device=None,
774
- dtype=None,
775
- ):
776
- factory_kwargs = {"device": device, "dtype": dtype}
777
- super().__init__()
778
- self.prenorm = prenorm
779
- self.p = p
780
- self.eps = eps
781
- self.residual_in_fp32 = residual_in_fp32
782
- self.weight = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
783
- self.bias = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
784
- self.reset_parameters()
785
-
786
- def reset_parameters(self):
787
- init.ones_(self.weight)
788
- init.zeros_(self.bias)
789
-
790
- def forward(self, x0, residual=None):
791
- return dropout_add_layer_norm(
792
- x0,
793
- residual,
794
- self.weight,
795
- self.bias,
796
- self.p if self.training else 0.0,
797
- self.eps,
798
- prenorm=self.prenorm,
799
- residual_in_fp32=self.residual_in_fp32,
800
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu118-x86_64-linux/flash_attn/ops/rms_norm.py DELETED
@@ -1,174 +0,0 @@
1
- # Copyright (c) 2022, Tri Dao.
2
- # Adapted from https://github.com/NVIDIA/apex/blob/master/apex/contrib/layer_norm/layer_norm.py
3
-
4
- import torch
5
- from torch.nn import init
6
-
7
- from flash_attn.ops.layer_norm import (
8
- DropoutAddLayerNormFn,
9
- DropoutAddLayerNormParallelResidualFn,
10
- DropoutAddLayerNormSubsetFn,
11
- )
12
-
13
-
14
- def rms_norm(x, weight, epsilon):
15
- return DropoutAddLayerNormFn.apply(
16
- x, None, weight, None, None, None, 0.0, epsilon, False, False, True
17
- )
18
-
19
-
20
- def dropout_add_rms_norm(
21
- x0,
22
- residual,
23
- weight,
24
- bias,
25
- dropout_p,
26
- epsilon,
27
- rowscale=None,
28
- layerscale=None,
29
- prenorm=False,
30
- residual_in_fp32=False,
31
- return_dropout_mask=False,
32
- ):
33
- """residual_in_fp32 only has an effect if residual is None.
34
- Otherwise residual dtype is residual.dtype.
35
- """
36
- return DropoutAddLayerNormFn.apply(
37
- x0,
38
- residual,
39
- weight,
40
- bias,
41
- rowscale,
42
- layerscale,
43
- dropout_p,
44
- epsilon,
45
- residual_in_fp32,
46
- prenorm,
47
- True,
48
- return_dropout_mask,
49
- )
50
-
51
-
52
- def dropout_add_rms_norm_subset(
53
- x0,
54
- residual,
55
- weight,
56
- bias,
57
- dropout_p,
58
- epsilon,
59
- layerscale=None,
60
- x0_subset=None,
61
- out_subset=None,
62
- rowscale_const=1.0,
63
- out_numrows=0,
64
- prenorm=False,
65
- residual_in_fp32=False,
66
- return_dropout_mask=False,
67
- ):
68
- """residual_in_fp32 only has an effect if residual is None.
69
- Otherwise residual dtype is residual.dtype.
70
- """
71
- return DropoutAddLayerNormSubsetFn.apply(
72
- x0,
73
- residual,
74
- weight,
75
- bias,
76
- layerscale,
77
- x0_subset,
78
- out_subset,
79
- dropout_p,
80
- epsilon,
81
- rowscale_const,
82
- out_numrows,
83
- residual_in_fp32,
84
- prenorm,
85
- True,
86
- return_dropout_mask,
87
- )
88
-
89
-
90
- def dropout_add_rms_norm_parallel_residual(
91
- x0,
92
- x1,
93
- residual,
94
- weight0,
95
- bias0,
96
- weight1,
97
- bias1,
98
- dropout_p,
99
- epsilon,
100
- prenorm=False,
101
- residual_in_fp32=False,
102
- return_dropout_mask=False,
103
- ):
104
- """residual_in_fp32 only has an effect if residual is None.
105
- Otherwise residual dtype is residual.dtype.
106
- """
107
- return DropoutAddLayerNormParallelResidualFn.apply(
108
- x0,
109
- x1,
110
- residual,
111
- weight0,
112
- bias0,
113
- weight1,
114
- bias1,
115
- dropout_p,
116
- epsilon,
117
- residual_in_fp32,
118
- prenorm,
119
- True,
120
- return_dropout_mask,
121
- )
122
-
123
-
124
- class RMSNorm(torch.nn.Module):
125
- def __init__(self, hidden_size, eps=1e-5, device=None, dtype=None):
126
- factory_kwargs = {"device": device, "dtype": dtype}
127
- super().__init__()
128
- self.eps = eps
129
- self.weight = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
130
- self.register_parameter("bias", None)
131
- self.reset_parameters()
132
-
133
- def reset_parameters(self):
134
- init.ones_(self.weight)
135
-
136
- def forward(self, x):
137
- return rms_norm(x, self.weight, self.eps)
138
-
139
-
140
- class DropoutAddRMSNorm(torch.nn.Module):
141
- def __init__(
142
- self,
143
- hidden_size,
144
- prenorm=False,
145
- p=0.0,
146
- eps=1e-5,
147
- residual_in_fp32=False,
148
- device=None,
149
- dtype=None,
150
- ):
151
- factory_kwargs = {"device": device, "dtype": dtype}
152
- super().__init__()
153
- self.prenorm = prenorm
154
- self.p = p
155
- self.eps = eps
156
- self.residual_in_fp32 = residual_in_fp32
157
- self.weight = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
158
- self.register_parameter("bias", None)
159
- self.reset_parameters()
160
-
161
- def reset_parameters(self):
162
- init.ones_(self.weight)
163
-
164
- def forward(self, x0, residual=None):
165
- return dropout_add_rms_norm(
166
- x0,
167
- residual,
168
- self.weight,
169
- None,
170
- self.p if self.training else 0.0,
171
- self.eps,
172
- prenorm=self.prenorm,
173
- residual_in_fp32=self.residual_in_fp32,
174
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu118-x86_64-linux/flash_attn/ops/triton/__init__.py DELETED
@@ -1 +0,0 @@
1
-
 
 
build/torch26-cxx98-cu118-x86_64-linux/flash_attn/ops/triton/cross_entropy.py DELETED
@@ -1,330 +0,0 @@
1
- # Copyright (c) 2023, Tri Dao.
2
-
3
- from typing import Tuple, Optional, Union
4
-
5
- import torch
6
- import torch.nn.functional as F
7
-
8
- import triton
9
- import triton.language as tl
10
-
11
- # `all_gather_into_tensor` and `reduce_scatter_tensor` are new placeholders for
12
- # `_all_gather_base` and `_reduce_scatter_base`. They require the most recent
13
- # version of PyTorch. The following 2 lines are for backward compatibility with
14
- # older PyTorch.
15
- if "all_gather_into_tensor" not in dir(torch.distributed):
16
- torch.distributed.all_gather_into_tensor = torch.distributed._all_gather_base
17
-
18
-
19
- @triton.heuristics(
20
- {
21
- "HAS_SMOOTHING": lambda args: args["smoothing"] > 0.0,
22
- }
23
- )
24
- @triton.jit
25
- def cross_entropy_fwd_kernel(
26
- loss_ptr, # data ptrs
27
- lse_ptr,
28
- z_loss_ptr,
29
- logits_ptr,
30
- labels_ptr,
31
- smoothing,
32
- logit_scale,
33
- lse_square_scale,
34
- ignore_index,
35
- total_classes,
36
- class_start_idx, # Useful for tensor parallel when each rank only has a subset of classes
37
- n_cols, # shapes
38
- logits_row_stride, # strides
39
- BLOCK_SIZE: tl.constexpr,
40
- HAS_SMOOTHING: tl.constexpr,
41
- # if SPLIT (e.g. tensor parallel), don't include the LSE in the loss since it's not the final LSE
42
- SPLIT: tl.constexpr,
43
- PRECOMPUTED_LSE: tl.constexpr, # If LSE is already computed (also no smoothing and logit_scale == 1.0)
44
- ):
45
- row_idx = tl.program_id(0)
46
- logits_ptr = logits_ptr + row_idx * logits_row_stride.to(tl.int64)
47
- sum_logits = 0.0 # For smoothing
48
- if not PRECOMPUTED_LSE:
49
- # Statistics for online softmax
50
- m_i = -float("inf")
51
- l_i = 0.0
52
- for col_offset in range(0, n_cols, BLOCK_SIZE):
53
- cols = col_offset + tl.arange(0, BLOCK_SIZE)
54
- logits = tl.load(logits_ptr + cols, mask=cols < n_cols, other=-float("inf")).to(
55
- tl.float32
56
- ) * logit_scale
57
- if HAS_SMOOTHING:
58
- sum_logits += tl.sum(tl.where(cols < n_cols, logits, 0.0))
59
- m_i_new = tl.maximum(m_i, tl.max(logits))
60
- l_i = tl.exp(m_i - m_i_new) * l_i + tl.sum(tl.exp(logits - m_i_new))
61
- m_i = m_i_new
62
- lse = tl.log(l_i) + m_i
63
- tl.store(lse_ptr + row_idx, lse)
64
- else:
65
- lse = tl.load(lse_ptr + row_idx)
66
- label_idx = tl.load(labels_ptr + row_idx)
67
- if label_idx == ignore_index:
68
- loss = 0.0
69
- z_loss = 0.0
70
- else:
71
- label_idx -= class_start_idx
72
- if label_idx >= 0 and label_idx < n_cols:
73
- logits_label = tl.load(logits_ptr + label_idx) * logit_scale
74
- if HAS_SMOOTHING:
75
- loss = (
76
- (lse if not SPLIT else 0.0)
77
- - smoothing * sum_logits / total_classes
78
- - (1 - smoothing) * logits_label
79
- )
80
- else:
81
- loss = (lse if not SPLIT else 0.0) - logits_label
82
- else:
83
- # If label is out of bounds, we set the CE loss to 0.0. But we still want the smoothing loss
84
- if HAS_SMOOTHING:
85
- loss = smoothing * ((lse if not SPLIT else 0.0) - sum_logits / total_classes)
86
- else:
87
- loss = 0.0
88
- if not SPLIT:
89
- z_loss = lse_square_scale * lse * lse
90
- loss += z_loss
91
- else:
92
- z_loss = 0.0
93
- tl.store(loss_ptr + row_idx, loss)
94
- if not SPLIT:
95
- tl.store(z_loss_ptr + row_idx, z_loss)
96
-
97
-
98
- @triton.heuristics(
99
- {
100
- "HAS_SMOOTHING": lambda args: args["smoothing"] > 0.0,
101
- }
102
- )
103
- @triton.jit
104
- def cross_entropy_bwd_kernel(
105
- dlogits_ptr, # data ptrs
106
- dloss_ptr,
107
- logits_ptr,
108
- lse_ptr,
109
- labels_ptr,
110
- smoothing,
111
- logit_scale,
112
- lse_square_scale,
113
- ignore_index,
114
- total_classes,
115
- class_start_idx, # Useful for tensor parallel when each rank only has a subset of classes
116
- n_cols, # shapes
117
- logits_row_stride, # strides
118
- dlogits_row_stride,
119
- dloss_row_stride,
120
- BLOCK_SIZE: tl.constexpr,
121
- HAS_SMOOTHING: tl.constexpr,
122
- ):
123
- row_idx = tl.program_id(0)
124
- col_block_idx = tl.program_id(1)
125
- logits_ptr = logits_ptr + row_idx * logits_row_stride.to(tl.int64)
126
- dlogits_ptr = dlogits_ptr + row_idx * dlogits_row_stride.to(tl.int64)
127
- col_offsets = col_block_idx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
128
- label_idx = tl.load(labels_ptr + row_idx)
129
- if label_idx != ignore_index:
130
- dloss = tl.load(dloss_ptr + row_idx * dloss_row_stride)
131
- else:
132
- dloss = 0.0
133
- logits = tl.load(logits_ptr + col_offsets, mask=col_offsets < n_cols, other=-float("inf")).to(
134
- tl.float32
135
- ) * logit_scale
136
- lse = tl.load(lse_ptr + row_idx)
137
- probs = tl.exp(logits - lse)
138
- probs += 2.0 * lse_square_scale * lse * probs
139
- label_idx -= class_start_idx
140
- if HAS_SMOOTHING:
141
- smooth_positive = 1.0 - smoothing
142
- smooth_negative = smoothing / total_classes
143
- probs = tl.where(col_offsets == label_idx, probs - smooth_positive, probs) - smooth_negative
144
- else:
145
- probs = tl.where(col_offsets == label_idx, probs - 1.0, probs)
146
- tl.store(dlogits_ptr + col_offsets, (dloss * logit_scale) * probs, mask=col_offsets < n_cols)
147
-
148
-
149
- class CrossEntropyLoss(torch.autograd.Function):
150
-
151
- @staticmethod
152
- def forward(
153
- ctx,
154
- logits,
155
- labels,
156
- precomputed_lse=None,
157
- smoothing=0.0,
158
- logit_scale=1.0,
159
- lse_square_scale=0.0,
160
- ignore_index=-100,
161
- inplace_backward=False,
162
- process_group=None,
163
- ):
164
- # For some reason Triton generates wrong code when labels has dtype long and its address
165
- # is not aligned to 16 bytes. The ld.global.b64 seems to load the wrong label index.
166
- if labels.dtype == torch.long and labels.data_ptr() % 16 != 0:
167
- labels = F.pad(labels, (0, 1))[..., :-1]
168
- assert labels.data_ptr() % 16 == 0
169
- assert logit_scale > 0.0
170
- n_rows, n_cols = logits.shape
171
- assert labels.shape == (n_rows,)
172
- world_size = 1 if process_group is None else torch.distributed.get_world_size(process_group)
173
- total_classes = world_size * n_cols
174
- rank = 0 if process_group is None else torch.distributed.get_rank(process_group)
175
- class_start_idx = rank * n_cols
176
- use_precomputed_lse = precomputed_lse is not None and logit_scale == 1.0 and smoothing == 0.0
177
-
178
- if logits.stride(-1) != 1:
179
- logits = logits.contiguous()
180
- MAX_BLOCK_SIZE = 16 * 1024
181
- BLOCK_SIZE = min(triton.next_power_of_2(n_cols), MAX_BLOCK_SIZE)
182
- num_warps = (
183
- 4
184
- if BLOCK_SIZE < 2048
185
- else (8 if BLOCK_SIZE < 8192 else (16 if BLOCK_SIZE < 128 * 1024 else 32))
186
- )
187
- losses = torch.empty(n_rows, dtype=torch.float, device=logits.device)
188
- if use_precomputed_lse:
189
- assert precomputed_lse.shape == (n_rows,)
190
- lse = precomputed_lse.contiguous()
191
- else:
192
- lse = torch.empty(n_rows, dtype=torch.float, device=logits.device)
193
- z_losses = torch.empty(n_rows, dtype=torch.float, device=logits.device)
194
- # Need this, otherwise Triton tries to launch from cuda:0 and we get
195
- # ValueError: Pointer argument (at 0) cannot be accessed from Triton (cpu tensor?)
196
- with torch.cuda.device(logits.device.index):
197
- cross_entropy_fwd_kernel[(n_rows,)](
198
- losses, # data ptrs
199
- lse,
200
- z_losses,
201
- logits,
202
- labels,
203
- smoothing,
204
- logit_scale,
205
- lse_square_scale,
206
- ignore_index,
207
- total_classes,
208
- class_start_idx,
209
- n_cols, # shapes
210
- logits.stride(0), # strides
211
- BLOCK_SIZE=BLOCK_SIZE, # constants
212
- SPLIT=world_size > 1,
213
- PRECOMPUTED_LSE=use_precomputed_lse,
214
- num_warps=num_warps,
215
- )
216
-
217
- if world_size > 1:
218
- # If there's no smoothing, if labels are in the vocab of this partition, losses contains
219
- # - predicted logit, and 0 otherwise.
220
- # If there's smoothing=0.1, for labels in the vocab of this partition, losses contains
221
- # -0.9 * predicted logit - 0.1 * sum logit / total_classes.
222
- # For labels not in the vocab of this partition, losses contains
223
- # -0.1 * sum logit / total_classes.
224
- if world_size > 1:
225
- lse_allgather = torch.empty(world_size, n_rows, dtype=lse.dtype, device=lse.device)
226
- torch.distributed.all_gather_into_tensor(lse_allgather, lse, group=process_group)
227
- handle_losses = torch.distributed.all_reduce(
228
- losses, op=torch.distributed.ReduceOp.SUM, group=process_group, async_op=True
229
- )
230
- lse = torch.logsumexp(lse_allgather, dim=0)
231
- handle_losses.wait()
232
- # After the allreduce, if there's no smoothing, the total losses are - predicted_logit,
233
- # we just have to add the (global) lse.
234
- # If there's smoothing=0.1, the total losses are
235
- # -0.9 * predicted_logit - 0.1 * sum logit / total_classes.
236
- # Again, we just have to add the (global) lse.
237
- losses += lse
238
- if lse_square_scale != 0.0:
239
- z_losses = lse_square_scale * lse.square()
240
- z_losses.masked_fill_(labels == ignore_index, 0.0)
241
- losses += z_losses
242
- else:
243
- z_losses = torch.zeros_like(losses)
244
- losses.masked_fill_(labels == ignore_index, 0.0)
245
-
246
- ctx.save_for_backward(logits, lse, labels)
247
- ctx.mark_non_differentiable(z_losses)
248
- ctx.smoothing = smoothing
249
- ctx.logit_scale = logit_scale
250
- ctx.lse_square_scale = lse_square_scale
251
- ctx.ignore_index = ignore_index
252
- ctx.total_classes = total_classes
253
- ctx.class_start_idx = class_start_idx
254
- ctx.inplace_backward = inplace_backward
255
- return losses, z_losses
256
-
257
- @staticmethod
258
- def backward(ctx, grad_losses, grad_z_losses):
259
- del grad_z_losses # z_losses are only for logging.
260
-
261
- logits, lse, labels = ctx.saved_tensors
262
- dlogits = logits if ctx.inplace_backward else torch.empty_like(logits)
263
- n_rows, n_cols = logits.shape
264
- BLOCK_SIZE = min(triton.next_power_of_2(n_cols), 4 * 1024)
265
- num_warps = 4 if BLOCK_SIZE < 2048 else (8 if BLOCK_SIZE < 8192 else 16)
266
- grid = lambda META: (n_rows, triton.cdiv(n_cols, META["BLOCK_SIZE"])) # noqa
267
- # Need this, otherwise Triton tries to launch from cuda:0 and we get
268
- # ValueError: Pointer argument (at 0) cannot be accessed from Triton (cpu tensor?)
269
- with torch.cuda.device(logits.device.index):
270
- cross_entropy_bwd_kernel[grid](
271
- dlogits, # data ptrs
272
- grad_losses,
273
- logits,
274
- lse,
275
- labels,
276
- ctx.smoothing,
277
- ctx.logit_scale,
278
- ctx.lse_square_scale,
279
- ctx.ignore_index,
280
- ctx.total_classes,
281
- ctx.class_start_idx,
282
- n_cols, # shapes
283
- logits.stride(0), # strides
284
- dlogits.stride(0),
285
- grad_losses.stride(0),
286
- BLOCK_SIZE=BLOCK_SIZE, # constants
287
- num_warps=num_warps,
288
- )
289
- return dlogits, None, None, None, None, None, None, None, None, None
290
-
291
-
292
- def cross_entropy_loss(
293
- logits: torch.Tensor,
294
- labels: torch.Tensor,
295
- precomputed_lse: Optional[torch.Tensor] = None,
296
- label_smoothing: float = 0.0,
297
- logit_scale: float = 1.0,
298
- lse_square_scale: float = 0.0,
299
- ignore_index=-100,
300
- inplace_backward: bool = False,
301
- process_group=None,
302
- ) -> Tuple[torch.Tensor, torch.Tensor]:
303
- """
304
- Arguments:
305
- logits: (batch, vocab_size)
306
- labels: (batch,)
307
- label_smoothing: float
308
- logit_scale: float. Multiply logits by this scale before calculating the loss.
309
- lse_square_scale: float. If > 0, we add lse_square_scale * lse(logits) ^ 2 to the loss.
310
- This is also referred to as "z-loss".
311
- ignore_index: int. If labels == ignore_index, the loss is set to 0.0.
312
- inplace_backward: bool. If True, we do the backward pass in-place by modifying the logits.
313
- This saves memory.
314
- process_group: if not None, we're doing Tensor Parallel: each process is responsible for
315
- one part of the vocab. The loss will be aggregated across processes.
316
- Returns:
317
- losses: (batch,), float
318
- z_losses: (batch,), float
319
- """
320
- return CrossEntropyLoss.apply(
321
- logits,
322
- labels,
323
- precomputed_lse,
324
- label_smoothing,
325
- logit_scale,
326
- lse_square_scale,
327
- ignore_index,
328
- inplace_backward,
329
- process_group,
330
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu118-x86_64-linux/flash_attn/ops/triton/k_activations.py DELETED
@@ -1,162 +0,0 @@
1
- # Adapted from https://github.com/facebookresearch/xformers/blob/main/xformers/triton/k_activations.py
2
- # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
3
- #
4
- # This source code is licensed under the BSD license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import math
8
- from enum import Enum
9
- from typing import Optional
10
-
11
- import triton
12
- import triton.language as tl
13
-
14
- _sqrt2pi = math.sqrt(2.0 / math.pi)
15
- _sqrt1_2 = math.sqrt(1.0 / 2)
16
- _gaussian_pdf_normalization = 1.0 / math.sqrt(2 * math.pi)
17
-
18
-
19
- class Activation(str, Enum):
20
- SquaredReLU = "squared_relu"
21
- GeLU = "gelu"
22
- GeLUApprox = "gelu_approx"
23
- LeakyReLU = "leaky_relu"
24
- ReLU = "relu"
25
-
26
-
27
- def get_triton_activation_kernel(activation: Optional[Activation]):
28
- return (
29
- {
30
- Activation.ReLU: relu,
31
- Activation.LeakyReLU: leaky_relu,
32
- Activation.GeLU: gelu,
33
- Activation.GeLUApprox: gelu_approx,
34
- Activation.SquaredReLU: squared_relu,
35
- }[activation]
36
- if activation
37
- else None
38
- )
39
-
40
-
41
- def get_triton_activation_bwd_kernel(activation: Optional[Activation]):
42
- return (
43
- {
44
- Activation.ReLU: relu_grad,
45
- Activation.LeakyReLU: leaky_relu_grad,
46
- Activation.GeLU: gelu_grad,
47
- Activation.GeLUApprox: gelu_approx_grad,
48
- Activation.SquaredReLU: squared_relu_grad,
49
- }[activation]
50
- if activation
51
- else None
52
- )
53
-
54
-
55
- @triton.jit
56
- def tanh(x):
57
- # Tanh is just a scaled sigmoid
58
- return 2 * tl.sigmoid(2 * x) - 1
59
-
60
-
61
- @triton.jit
62
- def cosh(x):
63
- exp_x = tl.exp(x)
64
- return (exp_x + 1.0 / exp_x) * 0.5
65
-
66
-
67
- # a Triton implementation of the most used activations
68
- # See for instance http://arxiv.org/abs/1606.08415 for an overview
69
-
70
- # ReLU
71
- @triton.jit
72
- def relu(x):
73
- """
74
- ReLU_ activation function
75
-
76
- .. _ReLU: https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html
77
- """
78
- zero = 0.0
79
- return tl.where(x >= 0, x, zero.to(x.dtype))
80
-
81
-
82
- @triton.jit
83
- def relu_grad(x):
84
- # ReLU is different from other activations
85
- # in that it does not require the input to retrospectively compute its gradient
86
- # here the input is the downstream gradient, and we return the upstream gradient directly
87
- zero = 0.0
88
- one = 1.0
89
- return tl.where(x >= 0, one.to(x.dtype), zero.to(x.dtype))
90
-
91
-
92
- @triton.jit
93
- def squared_relu(x):
94
- """
95
- Squared ReLU activation, as proposed in the Primer_ paper.
96
-
97
- .. _Primer: https://arxiv.org/abs/2109.08668
98
- """
99
- x_ = relu(x)
100
- return (x_ * x_).to(x.dtype)
101
-
102
-
103
- @triton.jit
104
- def squared_relu_grad(x):
105
- return tl.where(x >= 0, 2.0 * x, 0.0)
106
-
107
-
108
- # Leaky ReLU
109
- @triton.jit
110
- def leaky_relu(x):
111
- """
112
- LeakyReLU_ activation
113
-
114
- .. _LeakyReLU: https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html
115
- """
116
- scale = 0.01 + 0.0
117
- scale = scale.to(x.dtype)
118
- return tl.where(x >= 0, x, scale * x)
119
-
120
-
121
- @triton.jit
122
- def leaky_relu_grad(x):
123
- min_grad = 0.01
124
- max_grad = 1
125
-
126
- min_grad = min_grad.to(x.dtype)
127
- max_grad = max_grad.to(x.dtype)
128
-
129
- return tl.where(x >= 0, max_grad, min_grad)
130
-
131
-
132
- @triton.jit
133
- def gelu(x):
134
- """Gaussian Error Linear Unit (GELU)"""
135
- return x * 0.5 * (1.0 + tl.libdevice.erf(x * _sqrt1_2))
136
-
137
-
138
- @triton.jit
139
- def gelu_grad(x):
140
- cdf = 0.5 * (1.0 + tl.libdevice.erf(x * _sqrt1_2))
141
- pdf = tl.exp(-0.5 * x * x) * _gaussian_pdf_normalization
142
- return cdf + x * pdf
143
-
144
-
145
- @triton.jit
146
- def gelu_approx(x):
147
- """
148
- GeLU_ activation - Gaussian error linear unit, with tanh approximation
149
-
150
- .. _GeLU: https://arxiv.org/pdf/1606.08415.pdf
151
- """
152
- return 0.5 * x * (1.0 + tanh(_sqrt2pi * x * (1.0 + 0.044715 * x * x)))
153
-
154
-
155
- @triton.jit
156
- def gelu_approx_grad(x):
157
- # CREDITS: Fast implementation proposed in
158
- # https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/fused_bias_gelu.py#L30
159
- tanh_out = tanh(0.79788456 * x * (1 + 0.044715 * x * x))
160
- return 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (
161
- 1 + tanh_out
162
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu118-x86_64-linux/flash_attn/ops/triton/layer_norm.py DELETED
@@ -1,1252 +0,0 @@
1
- # Copyright (c) 2024, Tri Dao.
2
- # Implement dropout + residual + layer_norm / rms_norm.
3
-
4
- # Based on the Triton LayerNorm tutorial: https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html
5
- # For the backward pass, we keep weight_grad and bias_grad in registers and accumulate.
6
- # This is faster for dimensions up to 8k, but after that it's much slower due to register spilling.
7
- # The models we train have hidden dim up to 8k anyway (e.g. Llama 70B), so this is fine.
8
-
9
- import math
10
- from typing import Optional, List
11
-
12
- import torch
13
- import torch.nn.functional as F
14
- from torch import Tensor
15
-
16
- import triton
17
- import triton.language as tl
18
-
19
- from flash_attn.utils.torch import custom_fwd, custom_bwd
20
- from flash_attn.utils.library import triton_op
21
-
22
-
23
- def maybe_contiguous_lastdim(x):
24
- return x.contiguous() if x is not None and x.stride(-1) != 1 else x
25
-
26
-
27
- def maybe_contiguous(x):
28
- return x.contiguous() if x is not None else None
29
-
30
-
31
- def triton_autotune_configs():
32
- # Return configs with a valid warp count for the current device
33
- configs = []
34
- # Maximum threads per block is architecture-dependent in theory, but in reality all are 1024
35
- max_threads_per_block = 1024
36
- # Default to warp size 32 if not defined by device
37
- warp_size = getattr(torch.cuda.get_device_properties(torch.cuda.current_device()), "warp_size", 32)
38
- # Autotune for warp counts which are powers of 2 and do not exceed thread per block limit
39
- return [triton.Config({}, num_warps=warp_count) for warp_count in [1, 2, 4, 8, 16, 32]
40
- if warp_count * warp_size <= max_threads_per_block]
41
- # return [triton.Config({}, num_warps=8)]
42
-
43
-
44
- def layer_norm_ref(
45
- x,
46
- weight,
47
- bias,
48
- residual=None,
49
- x1=None,
50
- weight1=None,
51
- bias1=None,
52
- eps=1e-6,
53
- dropout_p=0.0,
54
- rowscale=None,
55
- prenorm=False,
56
- zero_centered_weight=False,
57
- dropout_mask=None,
58
- dropout_mask1=None,
59
- upcast=False,
60
- ):
61
- dtype = x.dtype
62
- if upcast:
63
- x = x.float()
64
- weight = weight.float()
65
- bias = bias.float() if bias is not None else None
66
- residual = residual.float() if residual is not None else residual
67
- x1 = x1.float() if x1 is not None else None
68
- weight1 = weight1.float() if weight1 is not None else None
69
- bias1 = bias1.float() if bias1 is not None else None
70
- if zero_centered_weight:
71
- weight = weight + 1.0
72
- if weight1 is not None:
73
- weight1 = weight1 + 1.0
74
- if x1 is not None:
75
- assert rowscale is None, "rowscale is not supported with parallel LayerNorm"
76
- if rowscale is not None:
77
- x = x * rowscale[..., None]
78
- if dropout_p > 0.0:
79
- if dropout_mask is not None:
80
- x = x.masked_fill(~dropout_mask, 0.0) / (1.0 - dropout_p)
81
- else:
82
- x = F.dropout(x, p=dropout_p)
83
- if x1 is not None:
84
- if dropout_mask1 is not None:
85
- x1 = x1.masked_fill(~dropout_mask1, 0.0) / (1.0 - dropout_p)
86
- else:
87
- x1 = F.dropout(x1, p=dropout_p)
88
- if x1 is not None:
89
- x = x + x1
90
- if residual is not None:
91
- x = (x + residual).to(x.dtype)
92
- out = F.layer_norm(x.to(weight.dtype), x.shape[-1:], weight=weight, bias=bias, eps=eps).to(
93
- dtype
94
- )
95
- if weight1 is None:
96
- return out if not prenorm else (out, x)
97
- else:
98
- out1 = F.layer_norm(
99
- x.to(weight1.dtype), x.shape[-1:], weight=weight1, bias=bias1, eps=eps
100
- ).to(dtype)
101
- return (out, out1) if not prenorm else (out, out1, x)
102
-
103
-
104
- def rms_norm_ref(
105
- x,
106
- weight,
107
- bias,
108
- residual=None,
109
- x1=None,
110
- weight1=None,
111
- bias1=None,
112
- eps=1e-6,
113
- dropout_p=0.0,
114
- rowscale=None,
115
- prenorm=False,
116
- zero_centered_weight=False,
117
- dropout_mask=None,
118
- dropout_mask1=None,
119
- upcast=False,
120
- ):
121
- dtype = x.dtype
122
- if upcast:
123
- x = x.float()
124
- weight = weight.float()
125
- bias = bias.float() if bias is not None else None
126
- residual = residual.float() if residual is not None else residual
127
- x1 = x1.float() if x1 is not None else None
128
- weight1 = weight1.float() if weight1 is not None else None
129
- bias1 = bias1.float() if bias1 is not None else None
130
- if zero_centered_weight:
131
- weight = weight + 1.0
132
- if weight1 is not None:
133
- weight1 = weight1 + 1.0
134
- if x1 is not None:
135
- assert rowscale is None, "rowscale is not supported with parallel LayerNorm"
136
- if rowscale is not None:
137
- x = x * rowscale[..., None]
138
- if dropout_p > 0.0:
139
- if dropout_mask is not None:
140
- x = x.masked_fill(~dropout_mask, 0.0) / (1.0 - dropout_p)
141
- else:
142
- x = F.dropout(x, p=dropout_p)
143
- if x1 is not None:
144
- if dropout_mask1 is not None:
145
- x1 = x1.masked_fill(~dropout_mask1, 0.0) / (1.0 - dropout_p)
146
- else:
147
- x1 = F.dropout(x1, p=dropout_p)
148
- if x1 is not None:
149
- x = x + x1
150
- if residual is not None:
151
- x = (x + residual).to(x.dtype)
152
- rstd = 1 / torch.sqrt((x.square()).mean(dim=-1, keepdim=True) + eps)
153
- out = ((x * rstd * weight) + bias if bias is not None else (x * rstd * weight)).to(dtype)
154
- if weight1 is None:
155
- return out if not prenorm else (out, x)
156
- else:
157
- out1 = ((x * rstd * weight1) + bias1 if bias1 is not None else (x * rstd * weight1)).to(
158
- dtype
159
- )
160
- return (out, out1) if not prenorm else (out, out1, x)
161
-
162
-
163
- @triton.autotune(
164
- configs=triton_autotune_configs(),
165
- key=["N", "HAS_RESIDUAL", "STORE_RESIDUAL_OUT", "IS_RMS_NORM", "HAS_BIAS", "HAS_X1", "HAS_W1", "HAS_B1"],
166
- )
167
- # torch compile doesn't like triton.heuristics, so we set these manually when calling the kernel
168
- # @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None})
169
- # @triton.heuristics({"HAS_RESIDUAL": lambda args: args["RESIDUAL"] is not None})
170
- # @triton.heuristics({"HAS_X1": lambda args: args["X1"] is not None})
171
- # @triton.heuristics({"HAS_W1": lambda args: args["W1"] is not None})
172
- # @triton.heuristics({"HAS_B1": lambda args: args["B1"] is not None})
173
- @triton.jit
174
- def _layer_norm_fwd_1pass_kernel(
175
- X, # pointer to the input
176
- Y, # pointer to the output
177
- W, # pointer to the weights
178
- B, # pointer to the biases
179
- RESIDUAL, # pointer to the residual
180
- X1,
181
- W1,
182
- B1,
183
- Y1,
184
- RESIDUAL_OUT, # pointer to the residual
185
- ROWSCALE,
186
- SEEDS, # Dropout seeds for each row
187
- DROPOUT_MASK,
188
- DROPOUT_MASK1,
189
- Mean, # pointer to the mean
190
- Rstd, # pointer to the 1/std
191
- stride_x_row, # how much to increase the pointer when moving by 1 row
192
- stride_y_row,
193
- stride_res_row,
194
- stride_res_out_row,
195
- stride_x1_row,
196
- stride_y1_row,
197
- M, # number of rows in X
198
- N, # number of columns in X
199
- eps, # epsilon to avoid division by zero
200
- dropout_p, # Dropout probability
201
- zero_centered_weight, # If true, add 1.0 to the weight
202
- IS_RMS_NORM: tl.constexpr,
203
- BLOCK_N: tl.constexpr,
204
- HAS_RESIDUAL: tl.constexpr,
205
- STORE_RESIDUAL_OUT: tl.constexpr,
206
- HAS_BIAS: tl.constexpr,
207
- HAS_DROPOUT: tl.constexpr,
208
- STORE_DROPOUT_MASK: tl.constexpr,
209
- HAS_ROWSCALE: tl.constexpr,
210
- HAS_X1: tl.constexpr,
211
- HAS_W1: tl.constexpr,
212
- HAS_B1: tl.constexpr,
213
- ):
214
- # Map the program id to the row of X and Y it should compute.
215
- row = tl.program_id(0)
216
- X += row * stride_x_row
217
- Y += row * stride_y_row
218
- if HAS_RESIDUAL:
219
- RESIDUAL += row * stride_res_row
220
- if STORE_RESIDUAL_OUT:
221
- RESIDUAL_OUT += row * stride_res_out_row
222
- if HAS_X1:
223
- X1 += row * stride_x1_row
224
- if HAS_W1:
225
- Y1 += row * stride_y1_row
226
- # Compute mean and variance
227
- cols = tl.arange(0, BLOCK_N)
228
- x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32)
229
- if HAS_ROWSCALE:
230
- rowscale = tl.load(ROWSCALE + row).to(tl.float32)
231
- x *= rowscale
232
- if HAS_DROPOUT:
233
- # Compute dropout mask
234
- # 7 rounds is good enough, and reduces register pressure
235
- keep_mask = tl.rand(tl.load(SEEDS + row).to(tl.uint32), cols, n_rounds=7) > dropout_p
236
- x = tl.where(keep_mask, x / (1.0 - dropout_p), 0.0)
237
- if STORE_DROPOUT_MASK:
238
- tl.store(DROPOUT_MASK + row * N + cols, keep_mask, mask=cols < N)
239
- if HAS_X1:
240
- x1 = tl.load(X1 + cols, mask=cols < N, other=0.0).to(tl.float32)
241
- if HAS_ROWSCALE:
242
- rowscale = tl.load(ROWSCALE + M + row).to(tl.float32)
243
- x1 *= rowscale
244
- if HAS_DROPOUT:
245
- # Compute dropout mask
246
- # 7 rounds is good enough, and reduces register pressure
247
- keep_mask = (
248
- tl.rand(tl.load(SEEDS + M + row).to(tl.uint32), cols, n_rounds=7) > dropout_p
249
- )
250
- x1 = tl.where(keep_mask, x1 / (1.0 - dropout_p), 0.0)
251
- if STORE_DROPOUT_MASK:
252
- tl.store(DROPOUT_MASK1 + row * N + cols, keep_mask, mask=cols < N)
253
- x += x1
254
- if HAS_RESIDUAL:
255
- residual = tl.load(RESIDUAL + cols, mask=cols < N, other=0.0).to(tl.float32)
256
- x += residual
257
- if STORE_RESIDUAL_OUT:
258
- tl.store(RESIDUAL_OUT + cols, x, mask=cols < N)
259
- if not IS_RMS_NORM:
260
- mean = tl.sum(x, axis=0) / N
261
- tl.store(Mean + row, mean)
262
- xbar = tl.where(cols < N, x - mean, 0.0)
263
- var = tl.sum(xbar * xbar, axis=0) / N
264
- else:
265
- xbar = tl.where(cols < N, x, 0.0)
266
- var = tl.sum(xbar * xbar, axis=0) / N
267
- rstd = 1 / tl.sqrt(var + eps)
268
- tl.store(Rstd + row, rstd)
269
- # Normalize and apply linear transformation
270
- mask = cols < N
271
- w = tl.load(W + cols, mask=mask).to(tl.float32)
272
- if zero_centered_weight:
273
- w += 1.0
274
- if HAS_BIAS:
275
- b = tl.load(B + cols, mask=mask).to(tl.float32)
276
- x_hat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd
277
- y = x_hat * w + b if HAS_BIAS else x_hat * w
278
- # Write output
279
- tl.store(Y + cols, y, mask=mask)
280
- if HAS_W1:
281
- w1 = tl.load(W1 + cols, mask=mask).to(tl.float32)
282
- if zero_centered_weight:
283
- w1 += 1.0
284
- if HAS_B1:
285
- b1 = tl.load(B1 + cols, mask=mask).to(tl.float32)
286
- y1 = x_hat * w1 + b1 if HAS_B1 else x_hat * w1
287
- tl.store(Y1 + cols, y1, mask=mask)
288
-
289
-
290
- def _layer_norm_fwd(
291
- x: Tensor,
292
- weight: Tensor,
293
- bias: Tensor,
294
- eps: float,
295
- residual: Optional[Tensor] = None,
296
- x1: Optional[Tensor] = None,
297
- weight1: Optional[Tensor] = None,
298
- bias1: Optional[Tensor] = None,
299
- dropout_p: float = 0.0,
300
- rowscale: Optional[Tensor] = None,
301
- out_dtype: Optional[torch.dtype] = None,
302
- residual_dtype: Optional[torch.dtype] = None,
303
- zero_centered_weight: bool = False,
304
- is_rms_norm: bool = False,
305
- return_dropout_mask: bool = False,
306
- out: Optional[Tensor] = None,
307
- residual_out: Optional[Tensor] = None
308
- ) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor):
309
- # Need to wrap to handle the case where residual_out is a alias of x, which makes torch.library
310
- # and torch.compile unhappy. Also allocate memory for out and residual_out if they are None
311
- # so that _layer_norm_fwd_impl doesn't have to return them.
312
- if out is None:
313
- out = torch.empty_like(x, dtype=x.dtype if out_dtype is None else out_dtype)
314
- if residual is not None:
315
- residual_dtype = residual.dtype
316
- if residual_out is None and (
317
- residual is not None
318
- or (residual_dtype is not None and residual_dtype != x.dtype)
319
- or dropout_p > 0.0
320
- or rowscale is not None
321
- or x1 is not None
322
- ):
323
- residual_out = torch.empty_like(
324
- x, dtype=residual_dtype if residual_dtype is not None else x.dtype
325
- )
326
- else:
327
- residual_out = None
328
- y1, mean, rstd, seeds, dropout_mask, dropout_mask1 = _layer_norm_fwd_impl(
329
- x,
330
- weight,
331
- bias,
332
- eps,
333
- out,
334
- residual=residual,
335
- x1=x1,
336
- weight1=weight1,
337
- bias1=bias1,
338
- dropout_p=dropout_p,
339
- rowscale=rowscale,
340
- zero_centered_weight=zero_centered_weight,
341
- is_rms_norm=is_rms_norm,
342
- return_dropout_mask=return_dropout_mask,
343
- residual_out=residual_out,
344
- )
345
- # residual_out is None if residual is None and residual_dtype == input_dtype and dropout_p == 0.0
346
- if residual_out is None:
347
- residual_out = x
348
- return out, y1, mean, rstd, residual_out, seeds, dropout_mask, dropout_mask1
349
-
350
-
351
- # [2025-04-28] torch.library.triton_op ignores the schema argument, but here we need the schema
352
- # since we're returning a tuple of tensors
353
- @triton_op("flash_attn::layer_norm_fwd_impl", mutates_args={"out", "residual_out"},
354
- schema="(Tensor x, Tensor weight, Tensor bias, float eps, Tensor(a!) out, Tensor? residual, Tensor? x1, Tensor? weight1, Tensor? bias1, float dropout_p, Tensor? rowscale, bool zero_centered_weight, bool is_rms_norm, bool return_dropout_mask, Tensor(a!)? residual_out) -> (Tensor y1, Tensor mean, Tensor rstd, Tensor seeds, Tensor dropout_mask, Tensor dropout_mask1)")
355
- def _layer_norm_fwd_impl(
356
- x: Tensor,
357
- weight: Tensor,
358
- bias: Tensor,
359
- eps: float,
360
- out: Tensor,
361
- residual: Optional[Tensor] = None,
362
- x1: Optional[Tensor] = None,
363
- weight1: Optional[Tensor] = None,
364
- bias1: Optional[Tensor] = None,
365
- dropout_p: float = 0.0,
366
- rowscale: Optional[Tensor] = None,
367
- zero_centered_weight: bool = False,
368
- is_rms_norm: bool = False,
369
- return_dropout_mask: bool = False,
370
- residual_out: Optional[Tensor] = None
371
- ) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor):
372
- M, N = x.shape
373
- assert x.stride(-1) == 1
374
- if residual is not None:
375
- assert residual.stride(-1) == 1
376
- assert residual.shape == (M, N)
377
- assert weight.shape == (N,)
378
- assert weight.stride(-1) == 1
379
- if bias is not None:
380
- assert bias.stride(-1) == 1
381
- assert bias.shape == (N,)
382
- if x1 is not None:
383
- assert x1.shape == x.shape
384
- assert rowscale is None
385
- assert x1.stride(-1) == 1
386
- if weight1 is not None:
387
- assert weight1.shape == (N,)
388
- assert weight1.stride(-1) == 1
389
- if bias1 is not None:
390
- assert bias1.shape == (N,)
391
- assert bias1.stride(-1) == 1
392
- if rowscale is not None:
393
- assert rowscale.is_contiguous()
394
- assert rowscale.shape == (M,)
395
- assert out.shape == x.shape
396
- assert out.stride(-1) == 1
397
- if residual_out is not None:
398
- assert residual_out.shape == x.shape
399
- assert residual_out.stride(-1) == 1
400
- if weight1 is not None:
401
- y1 = torch.empty_like(out)
402
- assert y1.stride(-1) == 1
403
- else:
404
- y1 = None
405
- mean = torch.empty((M,), dtype=torch.float32, device=x.device) if not is_rms_norm else None
406
- rstd = torch.empty((M,), dtype=torch.float32, device=x.device)
407
- if dropout_p > 0.0:
408
- seeds = torch.randint(
409
- 2**32, (M if x1 is None else 2 * M,), device=x.device, dtype=torch.int64
410
- )
411
- else:
412
- seeds = None
413
- if return_dropout_mask and dropout_p > 0.0:
414
- dropout_mask = torch.empty(M, N, device=x.device, dtype=torch.bool)
415
- if x1 is not None:
416
- dropout_mask1 = torch.empty(M, N, device=x.device, dtype=torch.bool)
417
- else:
418
- dropout_mask1 = None
419
- else:
420
- dropout_mask, dropout_mask1 = None, None
421
- # Less than 64KB per feature: enqueue fused kernel
422
- MAX_FUSED_SIZE = 65536 // x.element_size()
423
- BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
424
- if N > BLOCK_N:
425
- raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
426
- with torch.cuda.device(x.device.index):
427
- torch.library.wrap_triton(_layer_norm_fwd_1pass_kernel)[(M,)](
428
- x,
429
- out,
430
- weight,
431
- bias,
432
- residual,
433
- x1,
434
- weight1,
435
- bias1,
436
- y1,
437
- residual_out,
438
- rowscale,
439
- seeds,
440
- dropout_mask,
441
- dropout_mask1,
442
- mean,
443
- rstd,
444
- x.stride(0),
445
- out.stride(0),
446
- residual.stride(0) if residual is not None else 0,
447
- residual_out.stride(0) if residual_out is not None else 0,
448
- x1.stride(0) if x1 is not None else 0,
449
- y1.stride(0) if y1 is not None else 0,
450
- M,
451
- N,
452
- eps,
453
- dropout_p,
454
- # Passing bool make torch inductor very unhappy since it then tries to compare to int_max
455
- int(zero_centered_weight),
456
- is_rms_norm,
457
- BLOCK_N,
458
- residual is not None,
459
- residual_out is not None,
460
- bias is not None,
461
- dropout_p > 0.0,
462
- dropout_mask is not None,
463
- rowscale is not None,
464
- HAS_X1=x1 is not None,
465
- HAS_W1=weight1 is not None,
466
- HAS_B1=bias1 is not None,
467
- )
468
- return y1, mean, rstd, seeds, dropout_mask, dropout_mask1
469
-
470
-
471
- @triton.autotune(
472
- configs=triton_autotune_configs(),
473
- key=["N", "HAS_DRESIDUAL", "STORE_DRESIDUAL", "IS_RMS_NORM", "HAS_BIAS", "HAS_DROPOUT"],
474
- )
475
- # torch compile doesn't like triton.heuristics, so we set these manually when calling the kernel
476
- # @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None})
477
- # @triton.heuristics({"HAS_DRESIDUAL": lambda args: args["DRESIDUAL"] is not None})
478
- # @triton.heuristics({"STORE_DRESIDUAL": lambda args: args["DRESIDUAL_IN"] is not None})
479
- # @triton.heuristics({"HAS_ROWSCALE": lambda args: args["ROWSCALE"] is not None})
480
- # @triton.heuristics({"HAS_DY1": lambda args: args["DY1"] is not None})
481
- # @triton.heuristics({"HAS_DX1": lambda args: args["DX1"] is not None})
482
- # @triton.heuristics({"HAS_B1": lambda args: args["DB1"] is not None})
483
- # @triton.heuristics({"RECOMPUTE_OUTPUT": lambda args: args["Y"] is not None})
484
- @triton.jit
485
- def _layer_norm_bwd_kernel(
486
- X, # pointer to the input
487
- W, # pointer to the weights
488
- B, # pointer to the biases
489
- Y, # pointer to the output to be recomputed
490
- DY, # pointer to the output gradient
491
- DX, # pointer to the input gradient
492
- DW, # pointer to the partial sum of weights gradient
493
- DB, # pointer to the partial sum of biases gradient
494
- DRESIDUAL,
495
- W1,
496
- DY1,
497
- DX1,
498
- DW1,
499
- DB1,
500
- DRESIDUAL_IN,
501
- ROWSCALE,
502
- SEEDS,
503
- Mean, # pointer to the mean
504
- Rstd, # pointer to the 1/std
505
- stride_x_row, # how much to increase the pointer when moving by 1 row
506
- stride_y_row,
507
- stride_dy_row,
508
- stride_dx_row,
509
- stride_dres_row,
510
- stride_dy1_row,
511
- stride_dx1_row,
512
- stride_dres_in_row,
513
- M, # number of rows in X
514
- N, # number of columns in X
515
- eps, # epsilon to avoid division by zero
516
- dropout_p,
517
- zero_centered_weight,
518
- rows_per_program,
519
- IS_RMS_NORM: tl.constexpr,
520
- BLOCK_N: tl.constexpr,
521
- HAS_DRESIDUAL: tl.constexpr,
522
- STORE_DRESIDUAL: tl.constexpr,
523
- HAS_BIAS: tl.constexpr,
524
- HAS_DROPOUT: tl.constexpr,
525
- HAS_ROWSCALE: tl.constexpr,
526
- HAS_DY1: tl.constexpr,
527
- HAS_DX1: tl.constexpr,
528
- HAS_B1: tl.constexpr,
529
- RECOMPUTE_OUTPUT: tl.constexpr,
530
- ):
531
- # Map the program id to the elements of X, DX, and DY it should compute.
532
- row_block_id = tl.program_id(0)
533
- row_start = row_block_id * rows_per_program
534
- # Do not early exit if row_start >= M, because we need to write DW and DB
535
- cols = tl.arange(0, BLOCK_N)
536
- mask = cols < N
537
- X += row_start * stride_x_row
538
- if HAS_DRESIDUAL:
539
- DRESIDUAL += row_start * stride_dres_row
540
- if STORE_DRESIDUAL:
541
- DRESIDUAL_IN += row_start * stride_dres_in_row
542
- DY += row_start * stride_dy_row
543
- DX += row_start * stride_dx_row
544
- if HAS_DY1:
545
- DY1 += row_start * stride_dy1_row
546
- if HAS_DX1:
547
- DX1 += row_start * stride_dx1_row
548
- if RECOMPUTE_OUTPUT:
549
- Y += row_start * stride_y_row
550
- w = tl.load(W + cols, mask=mask).to(tl.float32)
551
- if zero_centered_weight:
552
- w += 1.0
553
- if RECOMPUTE_OUTPUT and HAS_BIAS:
554
- b = tl.load(B + cols, mask=mask, other=0.0).to(tl.float32)
555
- if HAS_DY1:
556
- w1 = tl.load(W1 + cols, mask=mask).to(tl.float32)
557
- if zero_centered_weight:
558
- w1 += 1.0
559
- dw = tl.zeros((BLOCK_N,), dtype=tl.float32)
560
- if HAS_BIAS:
561
- db = tl.zeros((BLOCK_N,), dtype=tl.float32)
562
- if HAS_DY1:
563
- dw1 = tl.zeros((BLOCK_N,), dtype=tl.float32)
564
- if HAS_B1:
565
- db1 = tl.zeros((BLOCK_N,), dtype=tl.float32)
566
- row_end = min((row_block_id + 1) * rows_per_program, M)
567
- for row in range(row_start, row_end):
568
- # Load data to SRAM
569
- x = tl.load(X + cols, mask=mask, other=0).to(tl.float32)
570
- dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32)
571
- if HAS_DY1:
572
- dy1 = tl.load(DY1 + cols, mask=mask, other=0).to(tl.float32)
573
- if not IS_RMS_NORM:
574
- mean = tl.load(Mean + row)
575
- rstd = tl.load(Rstd + row)
576
- # Compute dx
577
- xhat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd
578
- xhat = tl.where(mask, xhat, 0.0)
579
- if RECOMPUTE_OUTPUT:
580
- y = xhat * w + b if HAS_BIAS else xhat * w
581
- tl.store(Y + cols, y, mask=mask)
582
- wdy = w * dy
583
- dw += dy * xhat
584
- if HAS_BIAS:
585
- db += dy
586
- if HAS_DY1:
587
- wdy += w1 * dy1
588
- dw1 += dy1 * xhat
589
- if HAS_B1:
590
- db1 += dy1
591
- if not IS_RMS_NORM:
592
- c1 = tl.sum(xhat * wdy, axis=0) / N
593
- c2 = tl.sum(wdy, axis=0) / N
594
- dx = (wdy - (xhat * c1 + c2)) * rstd
595
- else:
596
- c1 = tl.sum(xhat * wdy, axis=0) / N
597
- dx = (wdy - xhat * c1) * rstd
598
- if HAS_DRESIDUAL:
599
- dres = tl.load(DRESIDUAL + cols, mask=mask, other=0).to(tl.float32)
600
- dx += dres
601
- # Write dx
602
- if STORE_DRESIDUAL:
603
- tl.store(DRESIDUAL_IN + cols, dx, mask=mask)
604
- if HAS_DX1:
605
- if HAS_DROPOUT:
606
- keep_mask = (
607
- tl.rand(tl.load(SEEDS + M + row).to(tl.uint32), cols, n_rounds=7) > dropout_p
608
- )
609
- dx1 = tl.where(keep_mask, dx / (1.0 - dropout_p), 0.0)
610
- else:
611
- dx1 = dx
612
- tl.store(DX1 + cols, dx1, mask=mask)
613
- if HAS_DROPOUT:
614
- keep_mask = tl.rand(tl.load(SEEDS + row).to(tl.uint32), cols, n_rounds=7) > dropout_p
615
- dx = tl.where(keep_mask, dx / (1.0 - dropout_p), 0.0)
616
- if HAS_ROWSCALE:
617
- rowscale = tl.load(ROWSCALE + row).to(tl.float32)
618
- dx *= rowscale
619
- tl.store(DX + cols, dx, mask=mask)
620
-
621
- X += stride_x_row
622
- if HAS_DRESIDUAL:
623
- DRESIDUAL += stride_dres_row
624
- if STORE_DRESIDUAL:
625
- DRESIDUAL_IN += stride_dres_in_row
626
- if RECOMPUTE_OUTPUT:
627
- Y += stride_y_row
628
- DY += stride_dy_row
629
- DX += stride_dx_row
630
- if HAS_DY1:
631
- DY1 += stride_dy1_row
632
- if HAS_DX1:
633
- DX1 += stride_dx1_row
634
- tl.store(DW + row_block_id * N + cols, dw, mask=mask)
635
- if HAS_BIAS:
636
- tl.store(DB + row_block_id * N + cols, db, mask=mask)
637
- if HAS_DY1:
638
- tl.store(DW1 + row_block_id * N + cols, dw1, mask=mask)
639
- if HAS_B1:
640
- tl.store(DB1 + row_block_id * N + cols, db1, mask=mask)
641
-
642
-
643
- def _layer_norm_bwd(
644
- dy: Tensor,
645
- x: Tensor,
646
- weight: Tensor,
647
- bias: Tensor,
648
- eps: float,
649
- mean: Tensor,
650
- rstd: Tensor,
651
- dresidual: Optional[Tensor] = None,
652
- dy1: Optional[Tensor] = None,
653
- weight1: Optional[Tensor] = None,
654
- bias1: Optional[Tensor] = None,
655
- seeds: Optional[Tensor] = None,
656
- dropout_p: float = 0.0,
657
- rowscale: Optional[Tensor] = None,
658
- has_residual: bool = False,
659
- has_x1: bool = False,
660
- zero_centered_weight: bool = False,
661
- is_rms_norm: bool = False,
662
- x_dtype: Optional[torch.dtype] = None,
663
- recompute_output: bool = False,
664
- ) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor):
665
- # Need to wrap to handle the case where dresidual_in or dx1 are aliases of x,
666
- # which makes torch.library unhappy
667
- dx, dw, db, dresidual_in, dx1, dw1, db1, y = _layer_norm_bwd_impl(
668
- dy,
669
- x,
670
- weight,
671
- bias,
672
- eps,
673
- mean,
674
- rstd,
675
- dresidual,
676
- dy1,
677
- weight1,
678
- bias1,
679
- seeds,
680
- dropout_p,
681
- rowscale,
682
- has_residual,
683
- has_x1,
684
- zero_centered_weight,
685
- is_rms_norm,
686
- x_dtype=x_dtype,
687
- recompute_output=recompute_output,
688
- )
689
- # Don't need to compute dresidual_in separately in this case
690
- if has_residual and dx.dtype == x.dtype and dropout_p == 0.0 and rowscale is None:
691
- dresidual_in = dx
692
- if has_x1 and dropout_p == 0.0:
693
- dx1 = dx
694
- return dx, dw, db, dresidual_in, dx1, dw1, db1, y
695
-
696
-
697
-
698
- @triton_op("flash_attn::layer_norm_bwd_impl", mutates_args={},
699
- schema="(Tensor dy, Tensor x, Tensor weight, Tensor bias, float eps, Tensor mean, Tensor rstd, Tensor? dresidual, Tensor? dy1, Tensor? weight1, Tensor? bias1, Tensor? seeds, float dropout_p, Tensor? rowscale, bool has_residual, bool has_x1, bool zero_centered_weight, bool is_rms_norm, ScalarType? x_dtype, bool recompute_output) -> (Tensor dx, Tensor dw, Tensor db, Tensor dresidual_in, Tensor dx1, Tensor dw1, Tensor db1, Tensor y)",
700
- allow_decomposition=False, # Don't let torch.compile trace inside
701
- )
702
- def _layer_norm_bwd_impl(
703
- dy: Tensor,
704
- x: Tensor,
705
- weight: Tensor,
706
- bias: Tensor,
707
- eps: float,
708
- mean: Tensor,
709
- rstd: Tensor,
710
- dresidual: Optional[Tensor] = None,
711
- dy1: Optional[Tensor] = None,
712
- weight1: Optional[Tensor] = None,
713
- bias1: Optional[Tensor] = None,
714
- seeds: Optional[Tensor] = None,
715
- dropout_p: float = 0.0,
716
- rowscale: Optional[Tensor] = None,
717
- has_residual: bool = False,
718
- has_x1: bool = False,
719
- zero_centered_weight: bool = False,
720
- is_rms_norm: bool = False,
721
- x_dtype: Optional[torch.dtype] = None,
722
- recompute_output: bool = False,
723
- ) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor):
724
- M, N = x.shape
725
- assert x.stride(-1) == 1
726
- dy = maybe_contiguous_lastdim(dy)
727
- assert dy.stride(-1) == 1
728
- assert dy.shape == (M, N)
729
- if dresidual is not None:
730
- dresidual = maybe_contiguous_lastdim(dresidual)
731
- assert dresidual.stride(-1) == 1
732
- assert dresidual.shape == (M, N)
733
- assert weight.shape == (N,)
734
- assert weight.stride(-1) == 1
735
- if bias is not None:
736
- assert bias.stride(-1) == 1
737
- assert bias.shape == (N,)
738
- if dy1 is not None:
739
- dy1 = maybe_contiguous_lastdim(dy1)
740
- assert weight1 is not None
741
- assert dy1.shape == dy.shape
742
- assert dy1.stride(-1) == 1
743
- if weight1 is not None:
744
- assert weight1.shape == (N,)
745
- assert weight1.stride(-1) == 1
746
- if bias1 is not None:
747
- assert bias1.shape == (N,)
748
- assert bias1.stride(-1) == 1
749
- if seeds is not None:
750
- assert seeds.is_contiguous()
751
- assert seeds.shape == (M if not has_x1 else M * 2,)
752
- if rowscale is not None:
753
- assert rowscale.is_contiguous()
754
- assert rowscale.shape == (M,)
755
- # allocate output
756
- dx = (
757
- torch.empty_like(x)
758
- if x_dtype is None
759
- else torch.empty(M, N, dtype=x_dtype, device=x.device)
760
- )
761
- dresidual_in = (
762
- torch.empty_like(x)
763
- if has_residual
764
- and (dx.dtype != x.dtype or dropout_p > 0.0 or rowscale is not None or has_x1)
765
- else None
766
- )
767
- dx1 = torch.empty_like(dx) if (has_x1 and dropout_p > 0.0) else None
768
- y = torch.empty(M, N, dtype=dy.dtype, device=dy.device) if recompute_output else None
769
- if recompute_output:
770
- assert weight1 is None, "recompute_output is not supported with parallel LayerNorm"
771
-
772
- # Less than 64KB per feature: enqueue fused kernel
773
- MAX_FUSED_SIZE = 65536 // x.element_size()
774
- BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
775
- if N > BLOCK_N:
776
- raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
777
- # Increasing the multiple (e.g. 8) will allow more thread blocks to be launched and hide the
778
- # latency of the gmem reads/writes, but will increase the time of summing up dw / db.
779
- sm_count = torch.cuda.get_device_properties(x.device).multi_processor_count * 8
780
- _dw = torch.empty((sm_count, N), dtype=torch.float32, device=weight.device)
781
- _db = (
782
- torch.empty((sm_count, N), dtype=torch.float32, device=bias.device)
783
- if bias is not None
784
- else None
785
- )
786
- _dw1 = torch.empty_like(_dw) if weight1 is not None else None
787
- _db1 = torch.empty_like(_db) if bias1 is not None else None
788
- rows_per_program = math.ceil(M / sm_count)
789
- grid = (sm_count,)
790
- with torch.cuda.device(x.device.index):
791
- torch.library.wrap_triton(_layer_norm_bwd_kernel)[grid](
792
- x,
793
- weight,
794
- bias,
795
- y,
796
- dy,
797
- dx,
798
- _dw,
799
- _db,
800
- dresidual,
801
- weight1,
802
- dy1,
803
- dx1,
804
- _dw1,
805
- _db1,
806
- dresidual_in,
807
- rowscale,
808
- seeds,
809
- mean,
810
- rstd,
811
- x.stride(0),
812
- 0 if not recompute_output else y.stride(0),
813
- dy.stride(0),
814
- dx.stride(0),
815
- dresidual.stride(0) if dresidual is not None else 0,
816
- dy1.stride(0) if dy1 is not None else 0,
817
- dx1.stride(0) if dx1 is not None else 0,
818
- dresidual_in.stride(0) if dresidual_in is not None else 0,
819
- M,
820
- N,
821
- eps,
822
- dropout_p,
823
- # Passing bool make torch inductor very unhappy since it then tries to compare to int_max
824
- int(zero_centered_weight),
825
- rows_per_program,
826
- is_rms_norm,
827
- BLOCK_N,
828
- dresidual is not None,
829
- dresidual_in is not None,
830
- bias is not None,
831
- dropout_p > 0.0,
832
- HAS_ROWSCALE=rowscale is not None,
833
- HAS_DY1=dy1 is not None,
834
- HAS_DX1=dx1 is not None,
835
- HAS_B1=bias1 is not None,
836
- RECOMPUTE_OUTPUT=y is not None,
837
- )
838
- dw = _dw.sum(0).to(weight.dtype)
839
- db = _db.sum(0).to(bias.dtype) if bias is not None else None
840
- dw1 = _dw1.sum(0).to(weight1.dtype) if weight1 is not None else None
841
- db1 = _db1.sum(0).to(bias1.dtype) if bias1 is not None else None
842
- # dresidual_in and dx1 could be None, the wrapper will handle assigning them from dx
843
- return dx, dw, db, dresidual_in, dx1, dw1, db1, y
844
-
845
-
846
- class LayerNormFn(torch.autograd.Function):
847
-
848
- @staticmethod
849
- def forward(
850
- ctx,
851
- x,
852
- weight,
853
- bias,
854
- residual=None,
855
- x1=None,
856
- weight1=None,
857
- bias1=None,
858
- eps=1e-6,
859
- dropout_p=0.0,
860
- rowscale=None,
861
- prenorm=False,
862
- residual_in_fp32=False,
863
- zero_centered_weight=False,
864
- is_rms_norm=False,
865
- return_dropout_mask=False,
866
- out_dtype=None,
867
- out=None,
868
- residual_out=None
869
- ):
870
- x_shape_og = x.shape
871
- # reshape input data into 2D tensor
872
- x = maybe_contiguous_lastdim(x.reshape(-1, x.shape[-1]))
873
- if residual is not None:
874
- assert residual.shape == x_shape_og
875
- residual = maybe_contiguous_lastdim(residual.reshape(-1, residual.shape[-1]))
876
- if x1 is not None:
877
- assert x1.shape == x_shape_og
878
- assert rowscale is None, "rowscale is not supported with parallel LayerNorm"
879
- x1 = maybe_contiguous_lastdim(x1.reshape(-1, x1.shape[-1]))
880
- weight = weight.contiguous()
881
- bias = maybe_contiguous(bias)
882
- weight1 = maybe_contiguous(weight1)
883
- bias1 = maybe_contiguous(bias1)
884
- if rowscale is not None:
885
- rowscale = rowscale.reshape(-1).contiguous()
886
- residual_dtype = (
887
- residual.dtype
888
- if residual is not None
889
- else (torch.float32 if residual_in_fp32 else None)
890
- )
891
- if out is not None:
892
- out = out.reshape(-1, out.shape[-1])
893
- if residual_out is not None:
894
- residual_out = residual_out.reshape(-1, residual_out.shape[-1])
895
- y, y1, mean, rstd, residual_out, seeds, dropout_mask, dropout_mask1 = _layer_norm_fwd(
896
- x,
897
- weight,
898
- bias,
899
- eps,
900
- residual,
901
- x1,
902
- weight1,
903
- bias1,
904
- dropout_p=dropout_p,
905
- rowscale=rowscale,
906
- out_dtype=out_dtype,
907
- residual_dtype=residual_dtype,
908
- zero_centered_weight=zero_centered_weight,
909
- is_rms_norm=is_rms_norm,
910
- return_dropout_mask=return_dropout_mask,
911
- out=out,
912
- residual_out=residual_out,
913
- )
914
- ctx.save_for_backward(
915
- residual_out, weight, bias, weight1, bias1, rowscale, seeds, mean, rstd
916
- )
917
- ctx.x_shape_og = x_shape_og
918
- ctx.eps = eps
919
- ctx.dropout_p = dropout_p
920
- ctx.is_rms_norm = is_rms_norm
921
- ctx.has_residual = residual is not None
922
- ctx.has_x1 = x1 is not None
923
- ctx.prenorm = prenorm
924
- ctx.x_dtype = x.dtype
925
- ctx.zero_centered_weight = zero_centered_weight
926
- y = y.reshape(x_shape_og)
927
- y1 = y1.reshape(x_shape_og) if y1 is not None else None
928
- residual_out = residual_out.reshape(x_shape_og) if residual_out is not None else None
929
- dropout_mask = dropout_mask.reshape(x_shape_og) if dropout_mask is not None else None
930
- dropout_mask1 = dropout_mask1.reshape(x_shape_og) if dropout_mask1 is not None else None
931
- if not return_dropout_mask:
932
- if weight1 is None:
933
- return y if not prenorm else (y, residual_out)
934
- else:
935
- return (y, y1) if not prenorm else (y, y1, residual_out)
936
- else:
937
- if weight1 is None:
938
- return (
939
- (y, dropout_mask, dropout_mask1)
940
- if not prenorm
941
- else (y, residual_out, dropout_mask, dropout_mask1)
942
- )
943
- else:
944
- return (
945
- (y, y1, dropout_mask, dropout_mask1)
946
- if not prenorm
947
- else (y, y1, residual_out, dropout_mask, dropout_mask1)
948
- )
949
-
950
- @staticmethod
951
- def backward(ctx, dy, *args):
952
- x, weight, bias, weight1, bias1, rowscale, seeds, mean, rstd = ctx.saved_tensors
953
- dy = dy.reshape(-1, dy.shape[-1])
954
- if weight1 is not None:
955
- dy1, args = args[0], args[1:]
956
- dy1 = dy1.reshape(-1, dy1.shape[-1])
957
- assert dy1.shape == x.shape
958
- else:
959
- dy1 = None
960
- if ctx.prenorm:
961
- dresidual = args[0]
962
- dresidual = dresidual.reshape(-1, dresidual.shape[-1])
963
- assert dresidual.shape == x.shape
964
- else:
965
- dresidual = None
966
- dx, dw, db, dresidual_in, dx1, dw1, db1, _ = _layer_norm_bwd(
967
- dy,
968
- x,
969
- weight,
970
- bias,
971
- ctx.eps,
972
- mean,
973
- rstd,
974
- dresidual,
975
- dy1,
976
- weight1,
977
- bias1,
978
- seeds,
979
- ctx.dropout_p,
980
- rowscale,
981
- ctx.has_residual,
982
- ctx.has_x1,
983
- ctx.zero_centered_weight,
984
- ctx.is_rms_norm,
985
- x_dtype=ctx.x_dtype,
986
- recompute_output=False,
987
- )
988
- return (
989
- dx.reshape(ctx.x_shape_og),
990
- dw,
991
- db,
992
- dresidual_in.reshape(ctx.x_shape_og) if ctx.has_residual else None,
993
- dx1.reshape(ctx.x_shape_og) if dx1 is not None else None,
994
- dw1,
995
- db1,
996
- None,
997
- None,
998
- None,
999
- None,
1000
- None,
1001
- None,
1002
- None,
1003
- None,
1004
- None,
1005
- None,
1006
- None,
1007
- )
1008
-
1009
-
1010
- def layer_norm_fn(
1011
- x,
1012
- weight,
1013
- bias,
1014
- residual=None,
1015
- x1=None,
1016
- weight1=None,
1017
- bias1=None,
1018
- eps=1e-6,
1019
- dropout_p=0.0,
1020
- rowscale=None,
1021
- prenorm=False,
1022
- residual_in_fp32=False,
1023
- zero_centered_weight=False,
1024
- is_rms_norm=False,
1025
- return_dropout_mask=False,
1026
- out_dtype=None,
1027
- out=None,
1028
- residual_out=None
1029
- ):
1030
- return LayerNormFn.apply(
1031
- x,
1032
- weight,
1033
- bias,
1034
- residual,
1035
- x1,
1036
- weight1,
1037
- bias1,
1038
- eps,
1039
- dropout_p,
1040
- rowscale,
1041
- prenorm,
1042
- residual_in_fp32,
1043
- zero_centered_weight,
1044
- is_rms_norm,
1045
- return_dropout_mask,
1046
- out_dtype,
1047
- out,
1048
- residual_out
1049
- )
1050
-
1051
-
1052
- def rms_norm_fn(
1053
- x,
1054
- weight,
1055
- bias,
1056
- residual=None,
1057
- x1=None,
1058
- weight1=None,
1059
- bias1=None,
1060
- eps=1e-6,
1061
- dropout_p=0.0,
1062
- rowscale=None,
1063
- prenorm=False,
1064
- residual_in_fp32=False,
1065
- zero_centered_weight=False,
1066
- return_dropout_mask=False,
1067
- out_dtype=None,
1068
- out=None,
1069
- residual_out=None
1070
- ):
1071
- return LayerNormFn.apply(
1072
- x,
1073
- weight,
1074
- bias,
1075
- residual,
1076
- x1,
1077
- weight1,
1078
- bias1,
1079
- eps,
1080
- dropout_p,
1081
- rowscale,
1082
- prenorm,
1083
- residual_in_fp32,
1084
- zero_centered_weight,
1085
- True,
1086
- return_dropout_mask,
1087
- out_dtype,
1088
- out,
1089
- residual_out
1090
- )
1091
-
1092
-
1093
- class RMSNorm(torch.nn.Module):
1094
-
1095
- def __init__(self, hidden_size, eps=1e-5, dropout_p=0.0, zero_centered_weight=False,
1096
- device=None, dtype=None):
1097
- factory_kwargs = {"device": device, "dtype": dtype}
1098
- super().__init__()
1099
- self.eps = eps
1100
- if dropout_p > 0.0:
1101
- self.drop = torch.nn.Dropout(dropout_p)
1102
- else:
1103
- self.drop = None
1104
- self.zero_centered_weight = zero_centered_weight
1105
- self.weight = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
1106
- self.register_parameter("bias", None)
1107
- self.reset_parameters()
1108
-
1109
- def reset_parameters(self):
1110
- if not self.zero_centered_weight:
1111
- torch.nn.init.ones_(self.weight)
1112
- else:
1113
- torch.nn.init.zeros_(self.weight)
1114
-
1115
- def forward(self, x, residual=None, prenorm=False, residual_in_fp32=False):
1116
- return rms_norm_fn(
1117
- x,
1118
- self.weight,
1119
- self.bias,
1120
- residual=residual,
1121
- eps=self.eps,
1122
- dropout_p=self.drop.p if self.drop is not None and self.training else 0.0,
1123
- prenorm=prenorm,
1124
- residual_in_fp32=residual_in_fp32,
1125
- zero_centered_weight=self.zero_centered_weight,
1126
- )
1127
-
1128
-
1129
- class LayerNormLinearFn(torch.autograd.Function):
1130
-
1131
- @staticmethod
1132
- @custom_fwd
1133
- def forward(
1134
- ctx,
1135
- x,
1136
- norm_weight,
1137
- norm_bias,
1138
- linear_weight,
1139
- linear_bias,
1140
- residual=None,
1141
- eps=1e-6,
1142
- prenorm=False,
1143
- residual_in_fp32=False,
1144
- is_rms_norm=False,
1145
- ):
1146
- x_shape_og = x.shape
1147
- # reshape input data into 2D tensor
1148
- x = maybe_contiguous_lastdim(x.reshape(-1, x.shape[-1]))
1149
- if residual is not None:
1150
- assert residual.shape == x_shape_og
1151
- residual = maybe_contiguous_lastdim(residual.reshape(-1, residual.shape[-1]))
1152
- norm_weight = norm_weight.contiguous()
1153
- norm_bias = maybe_contiguous(norm_bias)
1154
- residual_dtype = (
1155
- residual.dtype
1156
- if residual is not None
1157
- else (torch.float32 if residual_in_fp32 else None)
1158
- )
1159
- y, _, mean, rstd, residual_out, *rest = _layer_norm_fwd(
1160
- x,
1161
- norm_weight,
1162
- norm_bias,
1163
- eps,
1164
- residual,
1165
- out_dtype=None if not torch.is_autocast_enabled() else torch.get_autocast_dtype("cuda"),
1166
- residual_dtype=residual_dtype,
1167
- is_rms_norm=is_rms_norm,
1168
- )
1169
- y = y.reshape(x_shape_og)
1170
- dtype = torch.get_autocast_dtype("cuda") if torch.is_autocast_enabled() else y.dtype
1171
- linear_weight = linear_weight.to(dtype)
1172
- linear_bias = linear_bias.to(dtype) if linear_bias is not None else None
1173
- out = F.linear(y.to(linear_weight.dtype), linear_weight, linear_bias)
1174
- # We don't store y, will be recomputed in the backward pass to save memory
1175
- ctx.save_for_backward(residual_out, norm_weight, norm_bias, linear_weight, mean, rstd)
1176
- ctx.x_shape_og = x_shape_og
1177
- ctx.eps = eps
1178
- ctx.is_rms_norm = is_rms_norm
1179
- ctx.has_residual = residual is not None
1180
- ctx.prenorm = prenorm
1181
- ctx.x_dtype = x.dtype
1182
- ctx.linear_bias_is_none = linear_bias is None
1183
- return out if not prenorm else (out, residual_out.reshape(x_shape_og))
1184
-
1185
- @staticmethod
1186
- @custom_bwd
1187
- def backward(ctx, dout, *args):
1188
- x, norm_weight, norm_bias, linear_weight, mean, rstd = ctx.saved_tensors
1189
- dout = dout.reshape(-1, dout.shape[-1])
1190
- dy = F.linear(dout, linear_weight.t())
1191
- dlinear_bias = None if ctx.linear_bias_is_none else dout.sum(0)
1192
- dy = maybe_contiguous_lastdim(dy)
1193
- assert dy.shape == x.shape
1194
- if ctx.prenorm:
1195
- dresidual = args[0]
1196
- dresidual = maybe_contiguous_lastdim(dresidual.reshape(-1, dresidual.shape[-1]))
1197
- assert dresidual.shape == x.shape
1198
- else:
1199
- dresidual = None
1200
- dx, dnorm_weight, dnorm_bias, dresidual_in, _, _, _, y = _layer_norm_bwd(
1201
- dy,
1202
- x,
1203
- norm_weight,
1204
- norm_bias,
1205
- ctx.eps,
1206
- mean,
1207
- rstd,
1208
- dresidual=dresidual,
1209
- has_residual=ctx.has_residual,
1210
- is_rms_norm=ctx.is_rms_norm,
1211
- x_dtype=ctx.x_dtype,
1212
- recompute_output=True,
1213
- )
1214
- dlinear_weight = torch.einsum("bo,bi->oi", dout, y)
1215
- return (
1216
- dx.reshape(ctx.x_shape_og),
1217
- dnorm_weight,
1218
- dnorm_bias,
1219
- dlinear_weight,
1220
- dlinear_bias,
1221
- dresidual_in.reshape(ctx.x_shape_og) if ctx.has_residual else None,
1222
- None,
1223
- None,
1224
- None,
1225
- None,
1226
- )
1227
-
1228
-
1229
- def layer_norm_linear_fn(
1230
- x,
1231
- norm_weight,
1232
- norm_bias,
1233
- linear_weight,
1234
- linear_bias,
1235
- residual=None,
1236
- eps=1e-6,
1237
- prenorm=False,
1238
- residual_in_fp32=False,
1239
- is_rms_norm=False,
1240
- ):
1241
- return LayerNormLinearFn.apply(
1242
- x,
1243
- norm_weight,
1244
- norm_bias,
1245
- linear_weight,
1246
- linear_bias,
1247
- residual,
1248
- eps,
1249
- prenorm,
1250
- residual_in_fp32,
1251
- is_rms_norm,
1252
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu118-x86_64-linux/flash_attn/ops/triton/linear.py DELETED
@@ -1,594 +0,0 @@
1
- # Adapted from https://github.com/ELS-RD/kernl/blob/main/src/kernl/implementations/linear_layer.py
2
- # and https://github.com/openai/triton/blob/master/python/triton/ops/matmul.py
3
- from typing import Optional
4
-
5
- import torch
6
- import triton
7
- import triton.language as tl
8
- from triton.ops.matmul_perf_model import early_config_prune, estimate_matmul_time
9
-
10
- from flash_attn.ops.triton.k_activations import (
11
- gelu,
12
- gelu_approx,
13
- gelu_approx_grad,
14
- gelu_grad,
15
- squared_relu,
16
- squared_relu_grad,
17
- )
18
-
19
- # CREDITS: Initially inspired by the Triton tutorial on matrix multiplications
20
-
21
-
22
- def init_to_zero(name):
23
- return lambda nargs: nargs[name].zero_()
24
-
25
-
26
- def get_configs_io_bound():
27
- configs = []
28
- for num_stages in [2, 3, 4, 5, 6]:
29
- for block_m in [16, 32]:
30
- for block_k in [32, 64]:
31
- for block_n in [32, 64, 128, 256]:
32
- num_warps = 2 if block_n <= 64 else 4
33
- configs.append(
34
- triton.Config(
35
- {
36
- "BLOCK_M": block_m,
37
- "BLOCK_N": block_n,
38
- "BLOCK_K": block_k,
39
- "SPLIT_K": 1,
40
- },
41
- num_stages=num_stages,
42
- num_warps=num_warps,
43
- )
44
- )
45
- # split_k not used
46
- # for split_k in [2, 4, 8, 16]:
47
- # configs.append(triton.Config(
48
- # {'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': split_k},
49
- # num_stages=num_stages, num_warps=num_warps, pre_hook=init_to_zero('C')))
50
- return configs
51
-
52
-
53
- @triton.autotune(
54
- configs=[
55
- triton.Config(
56
- {"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=3, num_warps=8
57
- ),
58
- triton.Config(
59
- {"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=3, num_warps=8
60
- ),
61
- triton.Config(
62
- {"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4
63
- ),
64
- triton.Config(
65
- {"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4
66
- ),
67
- triton.Config(
68
- {"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4
69
- ),
70
- triton.Config(
71
- {"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4
72
- ),
73
- triton.Config(
74
- {"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4
75
- ),
76
- triton.Config(
77
- {"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4
78
- ),
79
- triton.Config(
80
- {"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=5, num_warps=2
81
- ),
82
- # good for int8
83
- triton.Config(
84
- {"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1},
85
- num_stages=3,
86
- num_warps=8,
87
- ),
88
- triton.Config(
89
- {"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1},
90
- num_stages=3,
91
- num_warps=8,
92
- ),
93
- triton.Config(
94
- {"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4
95
- ),
96
- triton.Config(
97
- {"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4
98
- ),
99
- triton.Config(
100
- {"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1},
101
- num_stages=4,
102
- num_warps=4,
103
- ),
104
- triton.Config(
105
- {"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4
106
- ),
107
- triton.Config(
108
- {"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4
109
- ),
110
- triton.Config(
111
- {"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4
112
- ),
113
- triton.Config(
114
- {"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=5, num_warps=2
115
- ),
116
- ]
117
- + get_configs_io_bound(),
118
- key=["CACHE_KEY_M", "CACHE_KEY_N", "CACHE_KEY_K"],
119
- prune_configs_by={
120
- "early_config_prune": early_config_prune,
121
- "perf_model": estimate_matmul_time,
122
- "top_k": 10,
123
- },
124
- )
125
- @triton.heuristics(
126
- {
127
- "EVEN_K": lambda args: args["K"] % (args["BLOCK_K"] * args["SPLIT_K"]) == 0,
128
- }
129
- )
130
- @triton.jit
131
- def kernel_fwd(
132
- C, # Pointers to matrices
133
- ACT_INPUT,
134
- A,
135
- B,
136
- bias,
137
- # Matrix dimensions
138
- M,
139
- N,
140
- K,
141
- CACHE_KEY_M,
142
- CACHE_KEY_N,
143
- CACHE_KEY_K,
144
- # The stride variables represent how much to increase the ptr by when moving by 1
145
- # element in a particular dimension. E.g. stride_am is how much to increase a_ptr
146
- # by to get the element one row down (A has M rows)
147
- stride_cm,
148
- # stride_cn, # Assume that stride_cn == 1
149
- stride_am,
150
- stride_ak,
151
- stride_bn,
152
- stride_bk,
153
- # Meta-parameters
154
- BLOCK_M: tl.constexpr,
155
- GROUP_M: tl.constexpr,
156
- BLOCK_N: tl.constexpr,
157
- BLOCK_K: tl.constexpr,
158
- # split k not used, not performant with activation, kept because early_config_prune is expecting it
159
- SPLIT_K: tl.constexpr,
160
- EVEN_K: tl.constexpr,
161
- A_ROWMAJOR: tl.constexpr,
162
- B_COLMAJOR: tl.constexpr,
163
- BIAS: tl.constexpr,
164
- SAVE_ACT_INPUT: tl.constexpr,
165
- ACTIVATION: tl.constexpr,
166
- ):
167
-
168
- """
169
- Kernel for computing Out = activation(A x W + C)
170
- - Input has shape (M, K)
171
- - Weight has shape (K, N)
172
- - Bias has shape (N,)
173
- - Output has shape (M, N)
174
- - ActInputs (optional) has shape (M, N)
175
- 'ActInputs' optionally saves the A x W + C intermediate for backward computations
176
- This kernel will consolidate over K
177
- """
178
-
179
- pid = tl.program_id(axis=0)
180
-
181
- grid_m = (M + BLOCK_M - 1) // BLOCK_M
182
- grid_n = (N + BLOCK_N - 1) // BLOCK_N
183
- # re-order program ID for better L2 performance
184
- width = GROUP_M * grid_n
185
- group_id = pid // width
186
- group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
187
- pid_m = group_id * GROUP_M + (pid % group_size)
188
- pid_n = (pid % width) // (group_size)
189
-
190
- # now compute the block that each program will go through
191
- # rm (resp. rn) denotes a range of indices
192
- # for rows (resp. col) of C
193
- rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
194
- rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
195
- # trick to avoid masking on M and N axis
196
- ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
197
- rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
198
- rk = tl.arange(0, BLOCK_K)
199
-
200
- if A_ROWMAJOR:
201
- A = A + (ram[:, None] * stride_am + rk[None, :])
202
- else:
203
- A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
204
- if B_COLMAJOR:
205
- B = B + (rk[:, None] + rbn[None, :] * stride_bn)
206
- else:
207
- B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
208
-
209
- acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
210
-
211
- for k in range(K, 0, -BLOCK_K):
212
- if EVEN_K:
213
- a = tl.load(A)
214
- b = tl.load(B)
215
- else:
216
- a = tl.load(A, mask=rk[None, :] < k, other=0.0)
217
- b = tl.load(B, mask=rk[:, None] < k, other=0.0)
218
- acc += tl.dot(a, b)
219
-
220
- if A_ROWMAJOR:
221
- A += BLOCK_K
222
- else:
223
- A += BLOCK_K * stride_ak
224
- if B_COLMAJOR:
225
- B += BLOCK_K
226
- else:
227
- B += BLOCK_K * stride_bk
228
-
229
- # Putting bias after the matmul (instead of before) is faster, idk why
230
- if BIAS:
231
- bias = tl.load(bias + rn, mask=rn < N, other=0.0).to(tl.float32)
232
- acc += bias[None, :]
233
-
234
- # optional: save the activation inputs
235
- if SAVE_ACT_INPUT:
236
- # act_in_ptrs = ACT_INPUT + ram[:, None] * stride_cm + rbn[None, :] * stride_cn
237
- act_in_ptrs = ACT_INPUT + ram[:, None] * stride_cm + rbn[None, :]
238
- tl.store(act_in_ptrs, acc)
239
-
240
- # optional: fused activation (while the data is in shared memory)
241
- if ACTIVATION == "gelu":
242
- acc = gelu(acc)
243
- elif ACTIVATION == "gelu_approx":
244
- acc = gelu_approx(acc)
245
- elif ACTIVATION == "squared_relu":
246
- acc = squared_relu(acc)
247
- # rematerialize rm and rn to save registers
248
- rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
249
- rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
250
-
251
- # write back result
252
- # C = C + rm[:, None] * stride_cm + rn[None, :] * stride_cn
253
- C = C + rm[:, None] * stride_cm + rn[None, :]
254
- mask = (rm < M)[:, None] & (rn < N)[None, :]
255
- tl.store(C, acc)
256
-
257
-
258
- def triton_linear_act(
259
- x: torch.Tensor,
260
- weight: torch.Tensor,
261
- bias: Optional[torch.Tensor] = None,
262
- activation: str = "id",
263
- save_act_input: bool = False,
264
- ) -> torch.Tensor:
265
- """
266
- Compute e = activation(x @ weight.T + bias).
267
- This wrapper kicks the `kernel_fwd` Triton kernel
268
- :param x: input tensor
269
- :param weight: weight matrix
270
- :param bias: an optional bias tensor
271
- :param activation: Activation name. Needs to be a Triton kernel.
272
- :param act_input: an optional tensor to save the activation inputs (for backward)
273
- :return: result tensor
274
- """
275
- # if torch.is_autocast_enabled():
276
- # dtype = torch.get_autocast_gpu_dtype()
277
- # x, weight, bias = [a.to(dtype=dtype) for a in [x, weight, bias]]
278
-
279
- assert activation in ["id", "gelu", "gelu_approx", "squared_relu"]
280
-
281
- batch_shape, n = x.shape[:-1], x.shape[-1]
282
- batch_dim = batch_shape.numel()
283
- x_reshaped = x.reshape(batch_dim, n)
284
-
285
- if x_reshaped.stride(0) > 1 and x_reshaped.stride(1) > 1:
286
- x_reshaped = x_reshaped.contiguous()
287
- if weight.stride(0) > 1 and weight.stride(1) > 1:
288
- weight = weight.contiguous()
289
- bias = bias.contiguous() if bias is not None else None
290
-
291
- assert (
292
- x.dtype == weight.dtype
293
- ), f"Input and weight must have the same dtype, got {x.dtype} and {weight.dtype}"
294
- if bias is not None:
295
- assert (
296
- x.dtype == bias.dtype
297
- ), f"Input and bias must have the same dtype, got {x.dtype} and {bias.dtype}"
298
- assert (
299
- x_reshaped.shape[1] == weight.shape[1]
300
- ), f"Incompatible dimensions: {x_reshaped.shape} - {weight.shape}"
301
-
302
- assert (
303
- bias is None or bias.shape[0] == weight.shape[0]
304
- ), "Incompatible dimensions in between weight and bias"
305
-
306
- M, K = x_reshaped.shape
307
- N, K = weight.shape
308
-
309
- output = torch.empty((M, N), device=x.device, dtype=x.dtype)
310
- act_input = torch.empty_like(output) if save_act_input else None
311
-
312
- # 1D launch kernel where each block gets its own program.
313
- grid = lambda META: (triton.cdiv(M, META["BLOCK_M"]) * triton.cdiv(N, META["BLOCK_N"]),) # noqa
314
-
315
- kernel_fwd[grid](
316
- output,
317
- act_input,
318
- x_reshaped,
319
- weight, # data ptrs
320
- bias if bias is not None else x, # auto skip bias if not present
321
- M, # shapes
322
- N,
323
- K,
324
- M // 32, # key for triton cache (limit number of compilations)
325
- N // 32,
326
- K // 32,
327
- stride_cm=output.stride(0), # strides
328
- # stride_cn=output.stride(1),
329
- stride_am=x_reshaped.stride(0),
330
- stride_ak=x_reshaped.stride(1),
331
- stride_bk=weight.stride(1),
332
- stride_bn=weight.stride(0),
333
- BIAS=bias is not None, # optional fused bias
334
- SAVE_ACT_INPUT=save_act_input, # optional save activation inputs
335
- ACTIVATION=activation, # optional fused activation
336
- A_ROWMAJOR=x_reshaped.stride(1) == 1,
337
- B_COLMAJOR=weight.stride(1) == 1,
338
- GROUP_M=8, # speed optimization: group the programs
339
- )
340
-
341
- if not save_act_input:
342
- return output.reshape(*batch_shape, output.shape[-1])
343
- else:
344
- return (
345
- output.reshape(*batch_shape, output.shape[-1]),
346
- act_input.reshape(*batch_shape, act_input.shape[-1]),
347
- )
348
-
349
-
350
- @triton.autotune(
351
- configs=[
352
- triton.Config(
353
- {"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=3, num_warps=8
354
- ),
355
- triton.Config(
356
- {"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=3, num_warps=8
357
- ),
358
- triton.Config(
359
- {"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4
360
- ),
361
- triton.Config(
362
- {"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4
363
- ),
364
- triton.Config(
365
- {"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4
366
- ),
367
- triton.Config(
368
- {"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4
369
- ),
370
- triton.Config(
371
- {"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4
372
- ),
373
- triton.Config(
374
- {"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4
375
- ),
376
- triton.Config(
377
- {"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=5, num_warps=2
378
- ),
379
- # good for int8
380
- triton.Config(
381
- {"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1},
382
- num_stages=3,
383
- num_warps=8,
384
- ),
385
- triton.Config(
386
- {"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1},
387
- num_stages=3,
388
- num_warps=8,
389
- ),
390
- triton.Config(
391
- {"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4
392
- ),
393
- triton.Config(
394
- {"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4
395
- ),
396
- triton.Config(
397
- {"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1},
398
- num_stages=4,
399
- num_warps=4,
400
- ),
401
- triton.Config(
402
- {"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4
403
- ),
404
- triton.Config(
405
- {"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4
406
- ),
407
- triton.Config(
408
- {"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4
409
- ),
410
- triton.Config(
411
- {"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=5, num_warps=2
412
- ),
413
- ]
414
- + get_configs_io_bound(),
415
- key=["CACHE_KEY_M", "CACHE_KEY_N", "CACHE_KEY_K"],
416
- prune_configs_by={
417
- "early_config_prune": early_config_prune,
418
- "perf_model": estimate_matmul_time,
419
- "top_k": 10,
420
- },
421
- )
422
- @triton.heuristics(
423
- {
424
- "EVEN_K": lambda args: args["K"] % (args["BLOCK_K"] * args["SPLIT_K"]) == 0,
425
- }
426
- )
427
- @triton.jit
428
- def kernel_bwd(
429
- C, # Pointers to matrices
430
- ACT_INPUT,
431
- A,
432
- B,
433
- # Matrix dimensions
434
- M,
435
- N,
436
- K,
437
- CACHE_KEY_M,
438
- CACHE_KEY_N,
439
- CACHE_KEY_K,
440
- # The stride variables represent how much to increase the ptr by when moving by 1
441
- # element in a particular dimension. E.g. stride_am is how much to increase a_ptr
442
- # by to get the element one row down (A has M rows)
443
- stride_cm,
444
- # stride_cn, # Assume that stride_cn == 1
445
- stride_am,
446
- stride_ak,
447
- stride_bk,
448
- stride_bn,
449
- # Meta-parameters
450
- BLOCK_M: tl.constexpr,
451
- GROUP_M: tl.constexpr,
452
- BLOCK_N: tl.constexpr,
453
- BLOCK_K: tl.constexpr,
454
- # split k not used, not performant with activation, kept because early_config_prune is expecting it
455
- SPLIT_K: tl.constexpr,
456
- EVEN_K: tl.constexpr,
457
- ACTIVATION: tl.constexpr,
458
- ):
459
-
460
- """
461
- Kernel for computing Out = activation(A x W + C)
462
- - Input has shape (M, K)
463
- - Weight has shape (K, N)
464
- - Output has shape (M, N)
465
- - ActInputs (optional) has shape (M, N)
466
- 'ActInputs' optionally saves the A x W + C intermediate for backward computations
467
- This kernel will consolidate over K
468
- """
469
-
470
- pid = tl.program_id(axis=0)
471
-
472
- grid_m = (M + BLOCK_M - 1) // BLOCK_M
473
- grid_n = (N + BLOCK_N - 1) // BLOCK_N
474
- # re-order program ID for better L2 performance
475
- width = GROUP_M * grid_n
476
- group_id = pid // width
477
- group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
478
- pid_m = group_id * GROUP_M + (pid % group_size)
479
- pid_n = (pid % width) // (group_size)
480
-
481
- # now compute the block that each program will go through
482
- # rm (resp. rn) denotes a range of indices
483
- # for rows (resp. col) of C
484
- rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
485
- rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
486
- # trick to avoid masking on M and N axis
487
- ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
488
- rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
489
- rk = tl.arange(0, BLOCK_K)
490
-
491
- A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
492
- B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
493
-
494
- acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
495
-
496
- for k in range(K, 0, -BLOCK_K):
497
- if EVEN_K:
498
- a = tl.load(A)
499
- b = tl.load(B)
500
- else:
501
- a = tl.load(A, mask=rk[None, :] < k, other=0.0)
502
- b = tl.load(B, mask=rk[:, None] < k, other=0.0)
503
- acc += tl.dot(a, b)
504
-
505
- A += BLOCK_K * stride_ak
506
- B += BLOCK_K * stride_bk
507
-
508
- # optional: fused activation (while the data is in shared memory)
509
- if ACTIVATION != "id":
510
- act_in_ptrs = ACT_INPUT + ram[:, None] * stride_cm + rbn[None, :]
511
- act_input = tl.load(act_in_ptrs).to(acc.dtype)
512
- if ACTIVATION == "gelu":
513
- acc *= gelu_grad(act_input)
514
- elif ACTIVATION == "gelu_approx":
515
- acc *= gelu_approx_grad(act_input)
516
- elif ACTIVATION == "squared_relu":
517
- acc *= squared_relu_grad(act_input)
518
-
519
- # rematerialize rm and rn to save registers
520
- rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
521
- rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
522
-
523
- # write back result
524
- C = C + rm[:, None] * stride_cm + rn[None, :]
525
- mask = (rm < M)[:, None] & (rn < N)[None, :]
526
- tl.store(C, acc, mask=mask)
527
-
528
-
529
- def triton_dgrad_act(
530
- grad_output: torch.Tensor,
531
- weight: torch.Tensor,
532
- activation: str = "id",
533
- act_input: Optional[torch.Tensor] = None,
534
- ) -> torch.Tensor:
535
- """
536
- Compute e = activation(grad_output @ weight + bias).
537
- This wrapper kicks the `kernel_fwd` Triton kernel
538
- :param grad_output: input tensor
539
- :param weight: weight matrix
540
- :param activation: Activation name. Needs to be a Triton kernel.
541
- :param act_input: an optional tensor to save the activation inputs (for backward)
542
- :return: result tensor
543
- """
544
- assert activation in ["id", "gelu", "gelu_approx", "squared_relu"]
545
-
546
- batch_shape, n = grad_output.shape[:-1], grad_output.shape[-1]
547
- batch_dim = batch_shape.numel()
548
- grad_output_reshaped = grad_output.reshape(batch_dim, n)
549
-
550
- if grad_output_reshaped.stride(0) > 1 and grad_output_reshaped.stride(1) > 1:
551
- grad_output_reshaped = grad_output_reshaped.contiguous()
552
- if weight.stride(0) > 1 and weight.stride(1) > 1:
553
- weight = weight.contiguous()
554
-
555
- assert (
556
- grad_output.dtype == weight.dtype
557
- ), f"grad_output and weight must have the same dtype, got {grad_output.dtype} and {weight.dtype}"
558
- assert (
559
- grad_output_reshaped.shape[1] == weight.shape[0]
560
- ), f"Incompatible dimensions: {grad_output_reshaped.shape} - {weight.shape}"
561
- if activation != "id":
562
- assert act_input is not None, f"act_input is required for activation {activation}"
563
-
564
- # M, N, K in bwd are different from M, N, K in fwd
565
- M, K = grad_output_reshaped.shape
566
- K, N = weight.shape
567
-
568
- grad_input = torch.empty((M, N), device=grad_output.device, dtype=grad_output.dtype)
569
-
570
- # 1D launch kernel where each block gets its own program.
571
- grid = lambda META: (triton.cdiv(M, META["BLOCK_M"]) * triton.cdiv(N, META["BLOCK_N"]),) # noqa
572
-
573
- kernel_bwd[grid](
574
- grad_input,
575
- act_input,
576
- grad_output_reshaped,
577
- weight, # data ptrs
578
- M, # shapes
579
- N,
580
- K,
581
- M // 32, # key for triton cache (limit number of compilations)
582
- N // 32,
583
- K // 32,
584
- stride_cm=grad_input.stride(0), # strides
585
- # stride_cn=grad_input.stride(1),
586
- stride_am=grad_output_reshaped.stride(0),
587
- stride_ak=grad_output_reshaped.stride(1),
588
- stride_bk=weight.stride(0),
589
- stride_bn=weight.stride(1),
590
- ACTIVATION=activation, # optional fused activation
591
- GROUP_M=8, # speed optimization: group the programs
592
- )
593
-
594
- return grad_input.reshape(*batch_shape, grad_input.shape[-1])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu118-x86_64-linux/flash_attn/ops/triton/mlp.py DELETED
@@ -1,149 +0,0 @@
1
- # The triton fused matmul + sqrelu is faster for fp16 but slower for bf16, compared
2
- # to naive implementation.
3
- import fused_dense_lib as fused_dense_cuda
4
- import torch
5
- import torch.nn as nn
6
- import torch.nn.functional as F
7
-
8
- from flash_attn.utils.torch import custom_fwd, custom_bwd
9
- from flash_attn.ops.activations import sqrelu_bwd, sqrelu_fwd
10
- from flash_attn.ops.triton.linear import triton_dgrad_act, triton_linear_act
11
-
12
-
13
- class FusedDenseSqreluDenseFunc(torch.autograd.Function):
14
- @staticmethod
15
- @custom_fwd
16
- def forward(ctx, x, weight1, bias1, weight2, bias2, checkpoint_lvl=0):
17
- """checkpoint_lvl:
18
- 0: no recomputation in the bwd
19
- 1: recompute gelu_out in the bwd
20
- 2: recompute act_input and gelu_out in the bwd
21
- """
22
- if torch.is_autocast_enabled():
23
- dtype = torch.get_autocast_gpu_dtype()
24
- x, weight1, bias1, weight2, bias2 = [
25
- a.to(dtype=dtype) for a in [x, weight1, bias1, weight2, bias2]
26
- ]
27
- is_bf16 = x.dtype == torch.bfloat16
28
- assert checkpoint_lvl in [0, 1, 2]
29
- x = x.contiguous()
30
- weight1 = weight1.contiguous()
31
- bias1 = bias1.contiguous()
32
- weight2 = weight2.contiguous()
33
- bias2 = bias2.contiguous()
34
- batch_shape, n = x.shape[:-1], x.shape[-1]
35
- batch_dim = batch_shape.numel()
36
- if is_bf16:
37
- act_input = fused_dense_cuda.linear_bias_forward(
38
- x.reshape(batch_dim, n), weight1, bias1
39
- )
40
- output1 = sqrelu_fwd(act_input)
41
- else:
42
- save_act_input = checkpoint_lvl != 2
43
- result = triton_linear_act(
44
- x.reshape(batch_dim, n),
45
- weight1,
46
- bias1,
47
- activation="squared_relu",
48
- save_act_input=save_act_input,
49
- )
50
- if save_act_input:
51
- output1, act_input = result
52
- else:
53
- output1 = result
54
- output2 = fused_dense_cuda.linear_bias_forward(output1, weight2, bias2)
55
- ctx.checkpoint_lvl = checkpoint_lvl
56
- if checkpoint_lvl == 0:
57
- ctx.save_for_backward(x, weight1, bias1, weight2, act_input, output1)
58
- elif checkpoint_lvl == 1:
59
- ctx.save_for_backward(x, weight1, bias1, weight2, act_input)
60
- elif checkpoint_lvl == 2:
61
- ctx.save_for_backward(x, weight1, bias1, weight2)
62
- return output2.reshape(*batch_shape, output2.shape[-1])
63
-
64
- @staticmethod
65
- @custom_bwd
66
- def backward(ctx, grad_output):
67
- grad_output = grad_output.contiguous()
68
- checkpoint_lvl = ctx.checkpoint_lvl
69
- x, weight1, bias1, weight2, *rest = ctx.saved_tensors
70
- batch_shape, n = x.shape[:-1], x.shape[-1]
71
- batch_dim = batch_shape.numel()
72
- is_bf16 = x.dtype == torch.bfloat16
73
- if checkpoint_lvl == 0:
74
- act_input, output1 = rest
75
- elif checkpoint_lvl == 1:
76
- (act_input,) = rest
77
- output1 = sqrelu_fwd(act_input)
78
- elif checkpoint_lvl == 2:
79
- if is_bf16:
80
- act_input = fused_dense_cuda.linear_bias_forward(
81
- x.reshape(batch_dim, n), weight1, bias1
82
- )
83
- output1 = sqrelu_fwd(act_input)
84
- else:
85
- output1, act_input = triton_linear_act(
86
- x.reshape(batch_dim, n),
87
- weight1,
88
- bias1,
89
- activation="squared_relu",
90
- save_act_input=True,
91
- )
92
-
93
- if is_bf16:
94
- grad_output = grad_output.reshape(batch_dim, grad_output.shape[-1])
95
- grad_weight2, grad_bias2 = fused_dense_cuda.linear_bias_wgrad(output1, grad_output)
96
- grad_output1 = grad_output @ weight2
97
- grad_act_input = sqrelu_bwd(grad_output1, act_input)
98
- grad_input, grad_weight1, grad_bias1 = fused_dense_cuda.linear_bias_backward(
99
- x.reshape(batch_dim, n), weight1, grad_act_input
100
- )
101
- else:
102
- grad_output = grad_output.reshape(batch_dim, grad_output.shape[-1])
103
- grad_weight2, grad_bias2 = fused_dense_cuda.linear_bias_wgrad(output1, grad_output)
104
- grad_act_input = triton_dgrad_act(
105
- grad_output, weight2, activation="squared_relu", act_input=act_input
106
- )
107
- grad_input, grad_weight1, grad_bias1 = fused_dense_cuda.linear_bias_backward(
108
- x.reshape(batch_dim, n), weight1, grad_act_input
109
- )
110
- return grad_input.reshape_as(x), grad_weight1, grad_bias1, grad_weight2, grad_bias2, None
111
-
112
-
113
- fused_dense_sqrelu_dense_function = FusedDenseSqreluDenseFunc.apply
114
-
115
-
116
- class FusedDenseSqreluDense(nn.Module):
117
- def __init__(
118
- self,
119
- in_features,
120
- hidden_features=None,
121
- out_features=None,
122
- bias1=True,
123
- bias2=True,
124
- checkpoint_lvl=0,
125
- device=None,
126
- dtype=None,
127
- ):
128
- """
129
- checkpoint_lvl (increasing lvl means slower but more memory saving):
130
- 0: no recomputation in the bwd
131
- 1: recompute gelu_out in the bwd
132
- 2: recompute gelu_in and gelu_out in the bwd
133
- """
134
- assert checkpoint_lvl in [0, 1, 2]
135
- factory_kwargs = {"device": device, "dtype": dtype}
136
- super().__init__()
137
- out_features = out_features or in_features
138
- hidden_features = hidden_features or in_features * 4
139
- assert bias1 == True, "DenseSqreluDense module without bias is currently not supported"
140
- assert bias2 == True, "DenseSqreluDense module without bias is currently not supported"
141
- self.checkpoint_lvl = checkpoint_lvl
142
- self.fc1 = nn.Linear(in_features, hidden_features, bias=bias1, **factory_kwargs)
143
- self.fc2 = nn.Linear(hidden_features, out_features, bias=bias2, **factory_kwargs)
144
-
145
- def forward(self, x):
146
- assert x.is_cuda
147
- return fused_dense_sqrelu_dense_function(
148
- x, self.fc1.weight, self.fc1.bias, self.fc2.weight, self.fc2.bias, self.checkpoint_lvl
149
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu118-x86_64-linux/flash_attn/ops/triton/rotary.py DELETED
@@ -1,185 +0,0 @@
1
- # Copyright (c) 2025, Tri Dao.
2
- # As of 2025-04-23, we require triton >= 3.0
3
-
4
- from typing import Optional, Union
5
-
6
- import torch
7
-
8
- import triton
9
- import triton.language as tl
10
-
11
-
12
- @triton.jit
13
- def rotary_kernel(
14
- OUT, # Pointers to matrices
15
- X,
16
- COS,
17
- SIN,
18
- CU_SEQLENS,
19
- SEQLEN_OFFSETS, # this could be int or a pointer
20
- # Matrix dimensions
21
- seqlen,
22
- nheads,
23
- seqlen_ro,
24
- # strides
25
- stride_out_batch,
26
- stride_out_seqlen,
27
- stride_out_nheads,
28
- stride_out_headdim,
29
- stride_x_batch,
30
- stride_x_seqlen,
31
- stride_x_nheads,
32
- stride_x_headdim,
33
- # Meta-parameters
34
- # We want ROTARY_DIM to be constexpr, otherwise the triton compiler doesn't know that
35
- # the mask is constant every 8 elements, and it will generate LDG.16 instead of LDG.128
36
- ROTARY_DIM: tl.constexpr,
37
- IS_SEQLEN_OFFSETS_TENSOR: tl.constexpr,
38
- IS_VARLEN: tl.constexpr,
39
- INTERLEAVED: tl.constexpr,
40
- CONJUGATE: tl.constexpr,
41
- BLOCK_H: tl.constexpr,
42
- BLOCK_M: tl.constexpr,
43
- ):
44
- BLOCK_K: tl.constexpr = triton.next_power_of_2(ROTARY_DIM)
45
- ROTARY_DIM_HALF = ROTARY_DIM // 2
46
- pid_head = tl.program_id(axis=0)
47
- pid_m = tl.program_id(axis=1)
48
- pid_batch = tl.program_id(axis=2)
49
-
50
- if not IS_VARLEN:
51
- X = X + pid_batch * stride_x_batch
52
- OUT = OUT + pid_batch * stride_out_batch
53
- else:
54
- start_idx = tl.load(CU_SEQLENS + pid_batch)
55
- seqlen = tl.load(CU_SEQLENS + pid_batch + 1) - start_idx
56
- X = X + start_idx * stride_x_seqlen
57
- OUT = OUT + start_idx * stride_out_seqlen
58
-
59
- if pid_m * BLOCK_M >= seqlen:
60
- return
61
-
62
- rh = pid_head * BLOCK_H + tl.arange(0, BLOCK_H)
63
- rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
64
- if not IS_SEQLEN_OFFSETS_TENSOR:
65
- rm_cs = rm + SEQLEN_OFFSETS
66
- else:
67
- rm_cs = rm + tl.load(SEQLEN_OFFSETS + pid_batch)
68
-
69
- rk_half = tl.arange(0, BLOCK_K // 2)
70
- COS = COS + (rm_cs[:, None] * ROTARY_DIM_HALF + rk_half[None, :])
71
- SIN = SIN + (rm_cs[:, None] * ROTARY_DIM_HALF + rk_half[None, :])
72
- mask_cs = (rm_cs[:, None] < seqlen_ro) & (rk_half[None, :] < ROTARY_DIM_HALF)
73
- cos = tl.load(COS, mask=mask_cs, other=1.0).to(tl.float32)
74
- sin = tl.load(SIN, mask=mask_cs, other=0.0).to(tl.float32)
75
- if CONJUGATE:
76
- sin = -sin
77
-
78
- if not INTERLEAVED:
79
- # Load the 1st and 2nd halves of X, do calculation, then store to 1st and 2nd halves of OUT
80
- X = X + (rh[:, None, None] * stride_x_nheads + rm[None, :, None] * stride_x_seqlen + rk_half[None, None, :] * stride_x_headdim)
81
- OUT = OUT + (rh[:, None, None] * stride_out_nheads + rm[None, :, None] * stride_out_seqlen + rk_half[None, None, :] * stride_out_headdim)
82
- mask = (rh[:, None, None] < nheads) & (rm[None, :, None] < seqlen) & (rk_half[None, None, :] < ROTARY_DIM_HALF)
83
- x0 = tl.load(X, mask=mask, other=0.0).to(tl.float32)
84
- x1 = tl.load(X + ROTARY_DIM_HALF * stride_x_headdim, mask=mask, other=0.0,).to(tl.float32)
85
- o0 = x0 * cos - x1 * sin
86
- o1 = x0 * sin + x1 * cos
87
- tl.store(OUT, o0, mask=mask)
88
- tl.store(OUT + ROTARY_DIM_HALF * stride_out_headdim, o1, mask=mask)
89
- else:
90
- rk = tl.arange(0, BLOCK_K)
91
- X = X + (rh[:, None, None] * stride_x_nheads + rm[None, :, None] * stride_x_seqlen + rk[None, None, :] * stride_x_headdim)
92
- OUT = OUT + (rh[:, None, None] * stride_out_nheads + rm[None, :, None] * stride_out_seqlen + rk[None, None, :] * stride_out_headdim)
93
- mask = (rh[:, None, None] < nheads) & (rm[None, :, None] < seqlen) & (rk[None, None, :] < ROTARY_DIM)
94
- x = tl.load(X, mask=mask, other=0.0).to(tl.float32)
95
- x0, x1 = tl.split(tl.reshape(x, [BLOCK_H, BLOCK_M, BLOCK_K // 2, 2]))
96
- o0 = x0 * cos - x1 * sin
97
- o1 = x0 * sin + x1 * cos
98
- o = tl.reshape(tl.join(o0, o1), [BLOCK_H, BLOCK_M, BLOCK_K])
99
- tl.store(OUT, o, mask=mask)
100
-
101
-
102
- def apply_rotary(
103
- x: torch.Tensor,
104
- cos: torch.Tensor,
105
- sin: torch.Tensor,
106
- seqlen_offsets: Union[int, torch.Tensor] = 0,
107
- cu_seqlens: Optional[torch.Tensor] = None,
108
- max_seqlen: Optional[int] = None,
109
- interleaved=False,
110
- inplace=False,
111
- conjugate=False,
112
- ) -> torch.Tensor:
113
- """
114
- Arguments:
115
- x: (batch, seqlen, nheads, headdim) if cu_seqlens is None
116
- else (total_seqlen, nheads, headdim).
117
- cos: (seqlen_ro, rotary_dim / 2)
118
- sin: (seqlen_ro, rotary_dim / 2)
119
- seqlen_offsets: integer or integer tensor of size (batch,)
120
- cu_seqlens: (batch + 1,) or None
121
- max_seqlen: int
122
- Returns:
123
- y: (batch, seqlen, nheads, headdim)
124
- """
125
- is_varlen = cu_seqlens is not None
126
- if not is_varlen:
127
- batch, seqlen, nheads, headdim = x.shape
128
- else:
129
- assert max_seqlen is not None, "If cu_seqlens is passed in, then max_seqlen must be passed"
130
- total_seqlen, nheads, headdim = x.shape
131
- batch_p_1 = cu_seqlens.shape[0]
132
- batch = batch_p_1 - 1
133
- seqlen = max_seqlen
134
- seqlen_ro, rotary_dim = cos.shape
135
- assert sin.shape == cos.shape
136
- rotary_dim *= 2
137
- assert rotary_dim <= headdim, "rotary_dim must be <= headdim"
138
- assert headdim <= 256, "Only support headdim <= 256"
139
- assert seqlen_ro >= seqlen, "seqlen_ro must be >= seqlen"
140
-
141
- cos, sin = cos.contiguous(), sin.contiguous()
142
- if isinstance(seqlen_offsets, torch.Tensor):
143
- assert seqlen_offsets.shape == (batch,)
144
- assert seqlen_offsets.dtype in [torch.int32, torch.int64]
145
- seqlen_offsets = seqlen_offsets.contiguous()
146
- else:
147
- assert seqlen_offsets + seqlen <= seqlen_ro
148
-
149
- output = torch.empty_like(x) if not inplace else x
150
- if rotary_dim < headdim and not inplace:
151
- output[..., rotary_dim:].copy_(x[..., rotary_dim:])
152
-
153
- grid = lambda META: (triton.cdiv(nheads, META["BLOCK_H"]), triton.cdiv(seqlen, META["BLOCK_M"]), batch) # noqa
154
- BLOCK_M = 8 if rotary_dim <= 128 else 4
155
-
156
- # Need this, otherwise Triton tries to launch from cuda:0 and we get
157
- # ValueError: Pointer argument (at 0) cannot be accessed from Triton (cpu tensor?)
158
- with torch.cuda.device(x.device.index):
159
- torch.library.wrap_triton(rotary_kernel)[grid](
160
- output, # data ptrs
161
- x,
162
- cos,
163
- sin,
164
- cu_seqlens,
165
- seqlen_offsets,
166
- seqlen, # shapes
167
- nheads,
168
- seqlen_ro,
169
- output.stride(0) if not is_varlen else 0, # batch_strides if not varlen else 0
170
- output.stride(-3), # seqlen_stride or total_seqlen_stride
171
- output.stride(-2), # nheads_stride
172
- output.stride(-1), # headdim_stride
173
- x.stride(0) if not is_varlen else 0, # batch_strides if not varlen else 0
174
- x.stride(-3), # seqlen stride or total_seqlen_stride
175
- x.stride(-2), # nheads stride
176
- x.stride(-1), # headdim stride
177
- rotary_dim,
178
- isinstance(seqlen_offsets, torch.Tensor),
179
- is_varlen,
180
- interleaved,
181
- conjugate,
182
- BLOCK_M=BLOCK_M,
183
- BLOCK_H=2,
184
- )
185
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/flash_attn/__init__.py DELETED
@@ -1,393 +0,0 @@
1
- from typing import Optional, List
2
- import torch
3
- from ._ops import ops as flash_attn_ops
4
- from .flash_attn_interface import (
5
- flash_attn_func,
6
- flash_attn_kvpacked_func,
7
- flash_attn_qkvpacked_func,
8
- flash_attn_varlen_func,
9
- flash_attn_varlen_kvpacked_func,
10
- flash_attn_varlen_qkvpacked_func,
11
- flash_attn_with_kvcache,
12
- )
13
-
14
-
15
- def fwd(
16
- q: torch.Tensor,
17
- k: torch.Tensor,
18
- v: torch.Tensor,
19
- out: Optional[torch.Tensor] = None,
20
- alibi_slopes: Optional[torch.Tensor] = None,
21
- p_dropout: float = 0.0,
22
- softmax_scale: Optional[float] = None,
23
- is_causal: bool = False,
24
- window_size_left: int = -1,
25
- window_size_right: int = -1,
26
- softcap: float = 0.0,
27
- return_softmax: bool = False,
28
- gen: Optional[torch.Generator] = None,
29
- ) -> List[torch.Tensor]:
30
- """
31
- Forward pass for multi-head attention.
32
-
33
- Args:
34
- q: Query tensor of shape [batch_size, seqlen_q, num_heads, head_size]
35
- k: Key tensor of shape [batch_size, seqlen_k, num_heads_k, head_size]
36
- v: Value tensor of shape [batch_size, seqlen_k, num_heads_k, head_size]
37
- out: Optional output tensor, same shape as q
38
- alibi_slopes: Optional ALiBi slopes tensor of shape [num_heads] or [batch_size, num_heads]
39
- p_dropout: Dropout probability
40
- softmax_scale: Scale factor for softmax
41
- is_causal: Whether to use causal attention
42
- window_size_left: Window size for left context (-1 for unlimited)
43
- window_size_right: Window size for right context (-1 for unlimited)
44
- softcap: Soft cap for attention weights
45
- return_softmax: Whether to return softmax weights
46
- gen: Optional random number generator
47
-
48
- Returns:
49
- List of tensors: [output, softmax_lse, (softmax if return_softmax)]
50
- """
51
- if softmax_scale is None:
52
- attention_head_dim = q.shape[-1]
53
- softmax_scale = 1.0 / (attention_head_dim**0.5)
54
-
55
- return flash_attn_ops.fwd(
56
- q,
57
- k,
58
- v,
59
- out,
60
- alibi_slopes,
61
- p_dropout,
62
- softmax_scale,
63
- is_causal,
64
- window_size_left,
65
- window_size_right,
66
- softcap,
67
- return_softmax,
68
- gen,
69
- )
70
-
71
-
72
- def varlen_fwd(
73
- q: torch.Tensor,
74
- k: torch.Tensor,
75
- v: torch.Tensor,
76
- cu_seqlens_q: torch.Tensor,
77
- cu_seqlens_k: torch.Tensor,
78
- out: Optional[torch.Tensor] = None,
79
- seqused_k: Optional[torch.Tensor] = None,
80
- leftpad_k: Optional[torch.Tensor] = None,
81
- block_table: Optional[torch.Tensor] = None,
82
- alibi_slopes: Optional[torch.Tensor] = None,
83
- max_seqlen_q: int = 0,
84
- max_seqlen_k: int = 0,
85
- p_dropout: float = 0.0,
86
- softmax_scale: Optional[float] = None,
87
- zero_tensors: bool = False,
88
- is_causal: bool = False,
89
- window_size_left: int = -1,
90
- window_size_right: int = -1,
91
- softcap: float = 0.0,
92
- return_softmax: bool = False,
93
- gen: Optional[torch.Generator] = None,
94
- ) -> List[torch.Tensor]:
95
- """
96
- Forward pass for multi-head attention with variable sequence lengths.
97
-
98
- Args:
99
- q: Query tensor of shape [total_q, num_heads, head_size]
100
- k: Key tensor of shape [total_k, num_heads_k, head_size] or [num_blocks, page_block_size, num_heads_k, head_size]
101
- v: Value tensor of shape [total_k, num_heads_k, head_size] or [num_blocks, page_block_size, num_heads_k, head_size]
102
- cu_seqlens_q: Cumulative sequence lengths for queries of shape [batch_size+1]
103
- cu_seqlens_k: Cumulative sequence lengths for keys of shape [batch_size+1]
104
- out: Optional output tensor of shape [total_q, num_heads, head_size]
105
- seqused_k: Optional tensor specifying how many keys to use per batch element [batch_size]
106
- leftpad_k: Optional left padding for keys of shape [batch_size]
107
- block_table: Optional block table of shape [batch_size, max_num_blocks_per_seq]
108
- alibi_slopes: Optional ALiBi slopes tensor of shape [num_heads] or [batch_size, num_heads]
109
- max_seqlen_q: Maximum sequence length for queries
110
- max_seqlen_k: Maximum sequence length for keys
111
- p_dropout: Dropout probability
112
- softmax_scale: Scale factor for softmax
113
- zero_tensors: Whether to zero tensors before computation
114
- is_causal: Whether to use causal attention
115
- window_size_left: Window size for left context (-1 for unlimited)
116
- window_size_right: Window size for right context (-1 for unlimited)
117
- softcap: Soft cap for attention weights
118
- return_softmax: Whether to return softmax weights
119
- gen: Optional random number generator
120
-
121
- Returns:
122
- List of tensors: [output, softmax_lse, (softmax if return_softmax)]
123
- """
124
- if softmax_scale is None:
125
- attention_head_dim = q.shape[-1]
126
- softmax_scale = 1.0 / (attention_head_dim**0.5)
127
-
128
- return flash_attn_ops.varlen_fwd(
129
- q,
130
- k,
131
- v,
132
- out,
133
- cu_seqlens_q,
134
- cu_seqlens_k,
135
- seqused_k,
136
- leftpad_k,
137
- block_table,
138
- alibi_slopes,
139
- max_seqlen_q,
140
- max_seqlen_k,
141
- p_dropout,
142
- softmax_scale,
143
- zero_tensors,
144
- is_causal,
145
- window_size_left,
146
- window_size_right,
147
- softcap,
148
- return_softmax,
149
- gen,
150
- )
151
-
152
-
153
- def bwd(
154
- dout: torch.Tensor,
155
- q: torch.Tensor,
156
- k: torch.Tensor,
157
- v: torch.Tensor,
158
- out: torch.Tensor,
159
- softmax_lse: torch.Tensor,
160
- dq: Optional[torch.Tensor] = None,
161
- dk: Optional[torch.Tensor] = None,
162
- dv: Optional[torch.Tensor] = None,
163
- alibi_slopes: Optional[torch.Tensor] = None,
164
- p_dropout: float = 0.0,
165
- softmax_scale: Optional[float] = None,
166
- is_causal: bool = False,
167
- window_size_left: int = -1,
168
- window_size_right: int = -1,
169
- softcap: float = 0.0,
170
- deterministic: bool = False,
171
- gen: Optional[torch.Generator] = None,
172
- rng_state: Optional[torch.Tensor] = None,
173
- ) -> List[torch.Tensor]:
174
- """
175
- Backward pass for multi-head attention.
176
-
177
- Args:
178
- dout: Gradient tensor of shape [batch_size, seqlen_q, num_heads, head_size]
179
- q: Query tensor of shape [batch_size, seqlen_q, num_heads, head_size]
180
- k: Key tensor of shape [batch_size, seqlen_k, num_heads_k, head_size]
181
- v: Value tensor of shape [batch_size, seqlen_k, num_heads_k, head_size]
182
- out: Output tensor from forward pass of shape [batch_size, seqlen_q, num_heads, head_size]
183
- softmax_lse: Log-sum-exp values from forward pass of shape [batch_size, num_heads, seqlen_q]
184
- dq: Optional gradient tensor for queries, same shape as q
185
- dk: Optional gradient tensor for keys, same shape as k
186
- dv: Optional gradient tensor for values, same shape as v
187
- alibi_slopes: Optional ALiBi slopes tensor of shape [num_heads] or [batch_size, num_heads]
188
- p_dropout: Dropout probability
189
- softmax_scale: Scale factor for softmax
190
- is_causal: Whether to use causal attention
191
- window_size_left: Window size for left context (-1 for unlimited)
192
- window_size_right: Window size for right context (-1 for unlimited)
193
- softcap: Soft cap for attention weights
194
- deterministic: Whether to use deterministic algorithms
195
- gen: Optional random number generator
196
- rng_state: Optional RNG state from forward pass
197
-
198
- Returns:
199
- List of tensors: [dq, dk, dv]
200
- """
201
- if softmax_scale is None:
202
- attention_head_dim = q.shape[-1]
203
- softmax_scale = 1.0 / (attention_head_dim**0.5)
204
-
205
- return flash_attn_ops.bwd(
206
- dout,
207
- q,
208
- k,
209
- v,
210
- out,
211
- softmax_lse,
212
- dq,
213
- dk,
214
- dv,
215
- alibi_slopes,
216
- p_dropout,
217
- softmax_scale,
218
- is_causal,
219
- window_size_left,
220
- window_size_right,
221
- softcap,
222
- deterministic,
223
- gen,
224
- rng_state,
225
- )
226
-
227
-
228
- def varlen_bwd(
229
- dout: torch.Tensor,
230
- q: torch.Tensor,
231
- k: torch.Tensor,
232
- v: torch.Tensor,
233
- out: torch.Tensor,
234
- softmax_lse: torch.Tensor,
235
- cu_seqlens_q: torch.Tensor,
236
- cu_seqlens_k: torch.Tensor,
237
- dq: Optional[torch.Tensor] = None,
238
- dk: Optional[torch.Tensor] = None,
239
- dv: Optional[torch.Tensor] = None,
240
- alibi_slopes: Optional[torch.Tensor] = None,
241
- max_seqlen_q: int = 0,
242
- max_seqlen_k: int = 0,
243
- p_dropout: float = 0.0,
244
- softmax_scale: Optional[float] = None,
245
- zero_tensors: bool = False,
246
- is_causal: bool = False,
247
- window_size_left: int = -1,
248
- window_size_right: int = -1,
249
- softcap: float = 0.0,
250
- deterministic: bool = False,
251
- gen: Optional[torch.Generator] = None,
252
- rng_state: Optional[torch.Tensor] = None,
253
- ) -> List[torch.Tensor]:
254
- """
255
- Backward pass for multi-head attention with variable sequence lengths.
256
-
257
- Args:
258
- dout: Gradient tensor of shape [batch_size, seqlen_q, num_heads, head_size]
259
- q: Query tensor of shape [batch_size, seqlen_q, num_heads, head_size]
260
- k: Key tensor of shape [batch_size, seqlen_k, num_heads_k, head_size]
261
- v: Value tensor of shape [batch_size, seqlen_k, num_heads_k, head_size]
262
- out: Output tensor from forward pass of shape [batch_size, seqlen_q, num_heads, head_size]
263
- softmax_lse: Log-sum-exp values from forward pass of shape [batch_size, num_heads, seqlen_q]
264
- cu_seqlens_q: Cumulative sequence lengths for queries of shape [batch_size+1]
265
- cu_seqlens_k: Cumulative sequence lengths for keys of shape [batch_size+1]
266
- dq: Optional gradient tensor for queries, same shape as q
267
- dk: Optional gradient tensor for keys, same shape as k
268
- dv: Optional gradient tensor for values, same shape as v
269
- alibi_slopes: Optional ALiBi slopes tensor of shape [num_heads] or [batch_size, num_heads]
270
- max_seqlen_q: Maximum sequence length for queries
271
- max_seqlen_k: Maximum sequence length for keys
272
- p_dropout: Dropout probability
273
- softmax_scale: Scale factor for softmax
274
- zero_tensors: Whether to zero tensors before computation
275
- is_causal: Whether to use causal attention
276
- window_size_left: Window size for left context (-1 for unlimited)
277
- window_size_right: Window size for right context (-1 for unlimited)
278
- softcap: Soft cap for attention weights
279
- deterministic: Whether to use deterministic algorithms
280
- gen: Optional random number generator
281
- rng_state: Optional RNG state from forward pass
282
-
283
- Returns:
284
- List of tensors: [dq, dk, dv]
285
- """
286
- if softmax_scale is None:
287
- attention_head_dim = q.shape[-1]
288
- softmax_scale = 1.0 / (attention_head_dim**0.5)
289
-
290
- return flash_attn_ops.varlen_bwd(
291
- dout,
292
- q,
293
- k,
294
- v,
295
- out,
296
- softmax_lse,
297
- dq,
298
- dk,
299
- dv,
300
- cu_seqlens_q,
301
- cu_seqlens_k,
302
- alibi_slopes,
303
- max_seqlen_q,
304
- max_seqlen_k,
305
- p_dropout,
306
- softmax_scale,
307
- zero_tensors,
308
- is_causal,
309
- window_size_left,
310
- window_size_right,
311
- softcap,
312
- deterministic,
313
- gen,
314
- rng_state,
315
- )
316
-
317
-
318
- def fwd_kvcache(
319
- q: torch.Tensor,
320
- kcache: torch.Tensor,
321
- vcache: torch.Tensor,
322
- k: Optional[torch.Tensor] = None,
323
- v: Optional[torch.Tensor] = None,
324
- seqlens_k: Optional[torch.Tensor] = None,
325
- rotary_cos: Optional[torch.Tensor] = None,
326
- rotary_sin: Optional[torch.Tensor] = None,
327
- cache_batch_idx: Optional[torch.Tensor] = None,
328
- leftpad_k: Optional[torch.Tensor] = None,
329
- block_table: Optional[torch.Tensor] = None,
330
- alibi_slopes: Optional[torch.Tensor] = None,
331
- out: Optional[torch.Tensor] = None,
332
- softmax_scale: Optional[float] = None,
333
- is_causal: bool = False,
334
- window_size_left: int = -1,
335
- window_size_right: int = -1,
336
- softcap: float = 0.0,
337
- is_rotary_interleaved: bool = False,
338
- num_splits: int = 1,
339
- ) -> List[torch.Tensor]:
340
- """
341
- Forward pass for multi-head attention with KV cache.
342
-
343
- Args:
344
- q: Query tensor of shape [batch_size, seqlen_q, num_heads, head_size]
345
- kcache: Key cache tensor of shape [batch_size_c, seqlen_k, num_heads_k, head_size] or [num_blocks, page_block_size, num_heads_k, head_size]
346
- vcache: Value cache tensor of shape [batch_size_c, seqlen_k, num_heads_k, head_size] or [num_blocks, page_block_size, num_heads_k, head_size]
347
- k: Optional new keys tensor of shape [batch_size, seqlen_knew, num_heads_k, head_size]
348
- v: Optional new values tensor of shape [batch_size, seqlen_knew, num_heads_k, head_size]
349
- seqlens_k: Optional sequence lengths for keys of shape [batch_size]
350
- rotary_cos: Optional rotary cosine tensor of shape [seqlen_ro, rotary_dim/2]
351
- rotary_sin: Optional rotary sine tensor of shape [seqlen_ro, rotary_dim/2]
352
- cache_batch_idx: Optional indices to index into the KV cache
353
- leftpad_k: Optional left padding for keys of shape [batch_size]
354
- block_table: Optional block table of shape [batch_size, max_num_blocks_per_seq]
355
- alibi_slopes: Optional ALiBi slopes tensor of shape [num_heads] or [batch_size, num_heads]
356
- out: Optional output tensor, same shape as q
357
- softmax_scale: Scale factor for softmax
358
- is_causal: Whether to use causal attention
359
- window_size_left: Window size for left context (-1 for unlimited)
360
- window_size_right: Window size for right context (-1 for unlimited)
361
- softcap: Soft cap for attention weights
362
- is_rotary_interleaved: Whether rotary embeddings are interleaved
363
- num_splits: Number of splits for computation
364
-
365
- Returns:
366
- List of tensors: [output, softmax_lse]
367
- """
368
- if softmax_scale is None:
369
- attention_head_dim = q.shape[-1]
370
- softmax_scale = 1.0 / (attention_head_dim**0.5)
371
-
372
- return flash_attn_ops.fwd_kvcache(
373
- q,
374
- kcache,
375
- vcache,
376
- k,
377
- v,
378
- seqlens_k,
379
- rotary_cos,
380
- rotary_sin,
381
- cache_batch_idx,
382
- leftpad_k,
383
- block_table,
384
- alibi_slopes,
385
- out,
386
- softmax_scale,
387
- is_causal,
388
- window_size_left,
389
- window_size_right,
390
- softcap,
391
- is_rotary_interleaved,
392
- num_splits,
393
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/flash_attn/_flash_attn_876ac68_dirty.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:0764a49f4b0f342e5b4f1e42a0ac7ef9035fcd720e34fdb3371c06de7e52c275
3
- size 447253888
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/flash_attn/_ops.py DELETED
@@ -1,9 +0,0 @@
1
- import torch
2
- from . import _flash_attn_876ac68_dirty
3
- ops = torch.ops._flash_attn_876ac68_dirty
4
-
5
- def add_op_namespace_prefix(op_name: str):
6
- """
7
- Prefix op by namespace.
8
- """
9
- return f"_flash_attn_876ac68_dirty::{op_name}"
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/flash_attn/bert_padding.py DELETED
@@ -1,218 +0,0 @@
1
- # Adapted from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/padding.py
2
-
3
- import torch
4
- import torch.nn.functional as F
5
- from einops import rearrange, repeat
6
-
7
-
8
- class IndexFirstAxis(torch.autograd.Function):
9
- @staticmethod
10
- def forward(ctx, input, indices):
11
- ctx.save_for_backward(indices)
12
- assert input.ndim >= 2
13
- ctx.first_axis_dim, other_shape = input.shape[0], input.shape[1:]
14
- second_dim = other_shape.numel()
15
- # TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
16
- # return input[indices]
17
- return torch.gather(
18
- rearrange(input, "b ... -> b (...)"), 0, repeat(indices, "z -> z d", d=second_dim)
19
- ).reshape(-1, *other_shape)
20
-
21
- @staticmethod
22
- def backward(ctx, grad_output):
23
- (indices,) = ctx.saved_tensors
24
- assert grad_output.ndim >= 2
25
- other_shape = grad_output.shape[1:]
26
- grad_output = rearrange(grad_output, "b ... -> b (...)")
27
- grad_input = torch.zeros(
28
- [ctx.first_axis_dim, grad_output.shape[1]],
29
- device=grad_output.device,
30
- dtype=grad_output.dtype,
31
- )
32
- # TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
33
- # grad_input[indices] = grad_output
34
- grad_input.scatter_(0, repeat(indices, "z -> z d", d=grad_output.shape[1]), grad_output)
35
- return grad_input.reshape(ctx.first_axis_dim, *other_shape), None
36
-
37
-
38
- index_first_axis = IndexFirstAxis.apply
39
-
40
-
41
- class IndexPutFirstAxis(torch.autograd.Function):
42
- @staticmethod
43
- def forward(ctx, values, indices, first_axis_dim):
44
- ctx.save_for_backward(indices)
45
- assert indices.ndim == 1
46
- assert values.ndim >= 2
47
- output = torch.zeros(
48
- first_axis_dim, *values.shape[1:], device=values.device, dtype=values.dtype
49
- )
50
- # TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
51
- output[indices] = values
52
- # output.scatter_(0, repeat(indices, 'z -> z d', d=values.shape[1]), values)
53
- return output
54
-
55
- @staticmethod
56
- def backward(ctx, grad_output):
57
- (indices,) = ctx.saved_tensors
58
- # TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
59
- grad_values = grad_output[indices]
60
- # grad_values = torch.gather(grad_output, 0, repeat(indices, 'z -> z d', d=grad_output.shape[1]))
61
- return grad_values, None, None
62
-
63
-
64
- index_put_first_axis = IndexPutFirstAxis.apply
65
-
66
-
67
- class IndexFirstAxisResidual(torch.autograd.Function):
68
- @staticmethod
69
- def forward(ctx, input, indices):
70
- ctx.save_for_backward(indices)
71
- assert input.ndim >= 2
72
- ctx.first_axis_dim, other_shape = input.shape[0], input.shape[1:]
73
- second_dim = other_shape.numel()
74
- # TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
75
- output = input[indices]
76
- # We don't want to reshape input (b ... -> b (...)) since it could change the channel_last
77
- # memory format to channel_first. In other words, input might not be contiguous.
78
- # If we don't detach, Pytorch complains about output being a view and is being modified inplace
79
- return output, input.detach()
80
-
81
- @staticmethod
82
- def backward(ctx, grad_output, grad_residual):
83
- (indices,) = ctx.saved_tensors
84
- assert grad_output.ndim >= 2
85
- other_shape = grad_output.shape[1:]
86
- assert grad_residual.shape[1:] == other_shape
87
- grad_input = grad_residual
88
- # grad_input[indices] += grad_output
89
- indices = indices.reshape(indices.shape[0], *((1,) * (grad_output.ndim - 1)))
90
- indices = indices.expand_as(grad_output)
91
- grad_input.scatter_add_(0, indices, grad_output)
92
- return grad_input.reshape(ctx.first_axis_dim, *other_shape), None
93
-
94
-
95
- index_first_axis_residual = IndexFirstAxisResidual.apply
96
-
97
-
98
- def unpad_input(hidden_states, attention_mask, unused_mask=None):
99
- """
100
- Arguments:
101
- hidden_states: (batch, seqlen, ...)
102
- attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid.
103
- unused_mask: (batch, seqlen), bool / int, 1 means the element is allocated but unused.
104
- Return:
105
- hidden_states: (total_nnz, ...), where total_nnz = number of tokens selected in attention_mask + unused_mask.
106
- indices: (total_nnz), the indices of masked tokens from the flattened input sequence.
107
- cu_seqlens: (batch + 1), the cumulative sequence lengths, used to index into hidden_states.
108
- max_seqlen_in_batch: int
109
- seqused: (batch), returns the number of tokens selected in attention_mask + unused_mask.
110
- """
111
- all_masks = (attention_mask + unused_mask) if unused_mask is not None else attention_mask
112
- seqlens_in_batch = all_masks.sum(dim=-1, dtype=torch.int32)
113
- used_seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
114
- indices = torch.nonzero(all_masks.flatten(), as_tuple=False).flatten()
115
- max_seqlen_in_batch = seqlens_in_batch.max().item()
116
- cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
117
- # TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
118
- # bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
119
- # times larger than it needs to be, wasting memory. It's faster and more memory-efficient to
120
- # index with integer indices. Moreover, torch's index is a bit slower than it needs to be,
121
- # so we write custom forward and backward to make it a bit faster.
122
- return (
123
- index_first_axis(rearrange(hidden_states, "b s ... -> (b s) ..."), indices),
124
- indices,
125
- cu_seqlens,
126
- max_seqlen_in_batch,
127
- used_seqlens_in_batch,
128
- )
129
-
130
-
131
- def unpad_input_for_concatenated_sequences(hidden_states, attention_mask_in_length):
132
- """
133
- Supports concatenating short samples in one sequence. The attention_mask_in_length is utilized to mask other short samples. It helps efficient training of variant lengths-based samples (e.g., the supervised fine-tuning task in large language model).
134
- The motivation for this function is explained [here](https://github.com/Dao-AILab/flash-attention/issues/432#issuecomment-1668822286).
135
-
136
- For example, if batch = 3 and seqlen = 6, the attention_mask_in_length is:
137
- ```
138
- [
139
- [2, 3, 0, 0, 0, 0],
140
- [3, 2, 0, 0, 0, 0],
141
- [6, 0, 0, 0, 0, 0]
142
- ]
143
- ```
144
- , which refers to the 3D-attention mask:
145
- ```
146
- [
147
- [
148
- [1, 0, 0, 0, 0, 0],
149
- [1, 1, 0, 0, 0, 0],
150
- [0, 0, 1, 0, 0, 0],
151
- [0, 0, 1, 1, 0, 0],
152
- [0, 0, 1, 1, 1, 0],
153
- [0, 0, 0, 0, 0, 1]
154
- ],
155
- [
156
- [1, 0, 0, 0, 0, 0],
157
- [1, 1, 0, 0, 0, 0],
158
- [1, 1, 1, 0, 0, 0],
159
- [0, 0, 0, 1, 0, 0],
160
- [0, 0, 0, 1, 1, 0],
161
- [0, 0, 0, 0, 0, 1]
162
- ],
163
- [
164
- [1, 0, 0, 0, 0, 0],
165
- [1, 1, 0, 0, 0, 0],
166
- [1, 1, 1, 0, 0, 0],
167
- [1, 1, 1, 1, 0, 0],
168
- [1, 1, 1, 1, 1, 0],
169
- [1, 1, 1, 1, 1, 1]
170
- ]
171
- ]
172
- ```.
173
-
174
- Arguments:
175
- hidden_states: (batch, seqlen, ...)
176
- attention_mask_in_length: (batch, seqlen), int, a nonzero number (e.g., 1, 2, 3, etc.) means length of concatenated sequence in b-th batch, and 0 means none.
177
- Return:
178
- hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
179
- indices: (total_nnz), the indices of non-masked tokens from the flattened input sequence.
180
- cu_seqlens: (batch + 1), the cumulative sequence lengths, used to index into hidden_states.
181
- max_seqlen_in_batch: int
182
- """
183
- length = attention_mask_in_length.sum(dim=-1)
184
- seqlen = attention_mask_in_length.size(-1)
185
- attention_mask_2d = torch.arange(seqlen, device=length.device, dtype=length.dtype).expand(len(length), seqlen) < length.unsqueeze(1)
186
- real_indices_idx = torch.nonzero(attention_mask_in_length.flatten(), as_tuple=False).flatten()
187
- seqlens_in_batch = attention_mask_in_length.flatten()[real_indices_idx]
188
- indices = torch.nonzero(attention_mask_2d.flatten(), as_tuple=False).flatten()
189
- max_seqlen_in_batch = seqlens_in_batch.max().item()
190
- cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
191
- # TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
192
- # bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
193
- # times larger than it needs to be, wasting memory. It's faster and more memory-efficient to
194
- # index with integer indices. Moreover, torch's index is a bit slower than it needs to be,
195
- # so we write custom forward and backward to make it a bit faster.
196
- return (
197
- index_first_axis(rearrange(hidden_states, "b s ... -> (b s) ..."), indices),
198
- indices,
199
- cu_seqlens,
200
- max_seqlen_in_batch,
201
- )
202
-
203
-
204
- def pad_input(hidden_states, indices, batch, seqlen):
205
- """
206
- Arguments:
207
- hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
208
- indices: (total_nnz), the indices that represent the non-masked tokens of the original padded input sequence.
209
- batch: int, batch size for the padded sequence.
210
- seqlen: int, maximum sequence length for the padded sequence.
211
- Return:
212
- hidden_states: (batch, seqlen, ...)
213
- """
214
- dim = hidden_states.shape[-1]
215
- # output = torch.zeros((batch * seqlen), dim, device=hidden_states.device, dtype=hidden_states.dtype)
216
- # output[indices] = hidden_states
217
- output = index_put_first_axis(hidden_states, indices, batch * seqlen)
218
- return rearrange(output, "(b s) ... -> b s ...", b=batch)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/flash_attn/flash_attn_interface.py DELETED
@@ -1,1609 +0,0 @@
1
- # Copyright (c) 2023, Tri Dao.
2
-
3
- from typing import Optional, Sequence, Tuple, Union
4
-
5
- import torch
6
- import torch.nn as nn
7
- import os
8
-
9
- # # isort: off
10
- # # We need to import the CUDA kernels after importing torch
11
- # USE_TRITON_ROCM = os.getenv("FLASH_ATTENTION_TRITON_AMD_ENABLE", "FALSE") == "TRUE"
12
- # if USE_TRITON_ROCM:
13
- # from .flash_attn_triton_amd import interface_fa as flash_attn_gpu
14
- # else:
15
- # import flash_attn_2_cuda as flash_attn_gpu
16
-
17
-
18
- from ._ops import ops as flash_attn_gpu
19
-
20
- # # isort: on
21
-
22
- def maybe_contiguous(x):
23
- return x.contiguous() if x is not None and x.stride(-1) != 1 else x
24
-
25
-
26
- def _get_block_size_n(device, head_dim, is_dropout, is_causal):
27
- # This should match the block sizes in the CUDA kernel
28
- assert head_dim <= 256
29
- major, minor = torch.cuda.get_device_capability(device)
30
- is_sm8x = major == 8 and minor > 0 # Only include sm86 and sm89, exclude sm80 (A100)
31
- is_sm80 = major == 8 and minor == 0
32
- is_sm90 = major == 9 and minor == 0
33
- if head_dim <= 32:
34
- return 128
35
- if head_dim <= 64:
36
- return 128 if not is_dropout else 64
37
- elif head_dim <= 96:
38
- return 64
39
- elif head_dim <= 128:
40
- if is_sm8x:
41
- return 64 if (not is_dropout and is_causal) else 32
42
- else:
43
- return 64 if not is_dropout else 32
44
- elif head_dim <= 192:
45
- return 64
46
- elif head_dim <= 224:
47
- return 64
48
- elif head_dim <= 256:
49
- return 64
50
-
51
-
52
- def round_multiple(x, m):
53
- return (x + m - 1) // m * m
54
-
55
-
56
- # torch.compile() support is only enabled for pytorch >= 2.4
57
- # The reason for this is that we are using the new custom_op and register_fake
58
- # APIs, which support inplace modification of inputs in the function itself
59
- if torch.__version__ >= "2.4.0":
60
- _torch_custom_op_wrapper = torch.library.custom_op
61
- _torch_register_fake_wrapper = torch.library.register_fake
62
- else:
63
- def noop_custom_op_wrapper(name, fn=None, /, *, mutates_args, device_types=None, schema=None):
64
- def wrap(func):
65
- return func
66
- if fn is None:
67
- return wrap
68
- return fn
69
- def noop_register_fake_wrapper(op, fn=None, /, *, lib=None, _stacklevel=1):
70
- def wrap(func):
71
- return func
72
- if fn is None:
73
- return wrap
74
- return fn
75
- _torch_custom_op_wrapper = noop_custom_op_wrapper
76
- _torch_register_fake_wrapper = noop_register_fake_wrapper
77
-
78
-
79
- @_torch_custom_op_wrapper("flash_attn::_flash_attn_forward", mutates_args=(), device_types="cuda")
80
- def _flash_attn_forward(
81
- q: torch.Tensor,
82
- k: torch.Tensor,
83
- v: torch.Tensor,
84
- dropout_p: float,
85
- softmax_scale: float,
86
- causal: bool,
87
- window_size_left: int,
88
- window_size_right: int,
89
- softcap: float,
90
- alibi_slopes: Optional[torch.Tensor],
91
- return_softmax: bool
92
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
93
- q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
94
- out, softmax_lse, S_dmask, rng_state = flash_attn_gpu.fwd(
95
- q,
96
- k,
97
- v,
98
- None,
99
- alibi_slopes,
100
- dropout_p,
101
- softmax_scale,
102
- causal,
103
- window_size_left,
104
- window_size_right,
105
- softcap,
106
- return_softmax,
107
- None,
108
- )
109
- return out, softmax_lse, S_dmask, rng_state
110
-
111
-
112
- @_torch_register_fake_wrapper("flash_attn::_flash_attn_forward")
113
- def _flash_attn_forward_fake(
114
- q: torch.Tensor,
115
- k: torch.Tensor,
116
- v: torch.Tensor,
117
- dropout_p: float,
118
- softmax_scale: float,
119
- causal: bool,
120
- window_size_left: int,
121
- window_size_right: int,
122
- softcap: float,
123
- alibi_slopes: Optional[torch.Tensor],
124
- return_softmax: bool
125
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
126
- q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
127
- batch_size, seqlen_q, num_heads, head_size = q.shape
128
- seqlen_k = k.shape[1]
129
- out = torch.empty_like(q)
130
- softmax_lse = torch.empty((batch_size, num_heads, seqlen_q), dtype=torch.float32, device=q.device, layout=q.layout)
131
- p = torch.empty((0,), dtype=q.dtype, device=q.device, layout=q.layout)
132
- if return_softmax:
133
- p = torch.empty((batch_size, num_heads, round_multiple(seqlen_q, 128), round_multiple(seqlen_k, 128)), dtype=q.dtype, device=q.device, layout=q.layout)
134
- rng_state = torch.empty((2,), dtype=torch.int64, device=q.device)
135
-
136
- return out, softmax_lse, p, rng_state
137
-
138
-
139
- if torch.__version__ >= "2.4.0":
140
- _wrapped_flash_attn_forward = torch.ops.flash_attn._flash_attn_forward
141
- else:
142
- _wrapped_flash_attn_forward = _flash_attn_forward
143
-
144
-
145
- @_torch_custom_op_wrapper("flash_attn::_flash_attn_varlen_forward", mutates_args=(), device_types="cuda")
146
- def _flash_attn_varlen_forward(
147
- q: torch.Tensor,
148
- k: torch.Tensor,
149
- v: torch.Tensor,
150
- cu_seqlens_q: torch.Tensor,
151
- cu_seqlens_k: torch.Tensor,
152
- max_seqlen_q: int,
153
- max_seqlen_k: int,
154
- dropout_p: float,
155
- softmax_scale: float,
156
- causal: bool,
157
- window_size_left: int = -1,
158
- window_size_right: int = -1,
159
- softcap: float = 0.0,
160
- alibi_slopes: Optional[torch.Tensor] = None,
161
- return_softmax: bool = False,
162
- block_table: Optional[torch.Tensor] = None,
163
- leftpad_k: Optional[torch.Tensor] = None,
164
- seqused_k: Optional[torch.Tensor] = None,
165
- zero_tensors: bool = False,
166
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
167
- q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
168
- out, softmax_lse, S_dmask, rng_state = flash_attn_gpu.varlen_fwd(
169
- q,
170
- k,
171
- v,
172
- None,
173
- cu_seqlens_q,
174
- cu_seqlens_k,
175
- seqused_k,
176
- leftpad_k,
177
- block_table,
178
- alibi_slopes,
179
- max_seqlen_q,
180
- max_seqlen_k,
181
- dropout_p,
182
- softmax_scale,
183
- zero_tensors,
184
- causal,
185
- window_size_left,
186
- window_size_right,
187
- softcap,
188
- return_softmax,
189
- None,
190
- )
191
- # if out.isnan().any() or softmax_lse.isnan().any():
192
- # breakpoint()
193
- return out, softmax_lse, S_dmask, rng_state
194
-
195
-
196
- @_torch_register_fake_wrapper("flash_attn::_flash_attn_varlen_forward")
197
- def _flash_attn_varlen_forward_fake(
198
- q: torch.Tensor,
199
- k: torch.Tensor,
200
- v: torch.Tensor,
201
- cu_seqlens_q: torch.Tensor,
202
- cu_seqlens_k: torch.Tensor,
203
- max_seqlen_q: int,
204
- max_seqlen_k: int,
205
- dropout_p: float,
206
- softmax_scale: float,
207
- causal: bool,
208
- window_size_left: int = -1,
209
- window_size_right: int = -1,
210
- softcap: float = 0.0,
211
- alibi_slopes: Optional[torch.Tensor] = None,
212
- return_softmax: bool = False,
213
- block_table: Optional[torch.Tensor] = None,
214
- leftpad_k: Optional[torch.Tensor] = None,
215
- seqused_k: Optional[torch.Tensor] = None,
216
- zero_tensors: bool = False,
217
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
218
- q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
219
- paged_kv = block_table is not None
220
- batch_size = cu_seqlens_q.numel() - 1
221
- total_q, num_heads, _ = q.shape
222
-
223
- out = torch.empty_like(q)
224
- softmax_lse = torch.empty((num_heads, total_q), dtype=torch.float32, device=q.device, layout=q.layout)
225
- p = torch.empty((0,), dtype=q.dtype, device=q.device, layout=q.layout)
226
- seqlen_q_rounded = round_multiple(max_seqlen_q, 128)
227
- seqlen_k_rounded = round_multiple(max_seqlen_k, 128)
228
- if return_softmax:
229
- p = torch.empty((batch_size, num_heads, seqlen_q_rounded, seqlen_k_rounded), dtype=q.dtype, device=q.device, layout=q.layout)
230
- rng_state = torch.empty((2,), dtype=torch.int64, device=q.device)
231
- return out, softmax_lse, p, rng_state
232
-
233
-
234
- if torch.__version__ >= "2.4.0":
235
- _wrapped_flash_attn_varlen_forward = torch.ops.flash_attn._flash_attn_varlen_forward
236
- else:
237
- _wrapped_flash_attn_varlen_forward = _flash_attn_varlen_forward
238
-
239
-
240
- @_torch_custom_op_wrapper("flash_attn::_flash_attn_backward", mutates_args=("dq", "dk", "dv"), device_types="cuda")
241
- def _flash_attn_backward(
242
- dout: torch.Tensor,
243
- q: torch.Tensor,
244
- k: torch.Tensor,
245
- v: torch.Tensor,
246
- out: torch.Tensor,
247
- softmax_lse: torch.Tensor,
248
- dq: Optional[torch.Tensor],
249
- dk: Optional[torch.Tensor],
250
- dv: Optional[torch.Tensor],
251
- dropout_p: float,
252
- softmax_scale: float,
253
- causal: bool,
254
- window_size_left: int,
255
- window_size_right: int,
256
- softcap: float,
257
- alibi_slopes: Optional[torch.Tensor],
258
- deterministic: bool,
259
- rng_state: Optional[torch.Tensor] = None,
260
- ) -> torch.Tensor:
261
- # dq, dk, dv are allocated by us so they should already be contiguous
262
- dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
263
- (
264
- dq,
265
- dk,
266
- dv,
267
- softmax_d,
268
- ) = flash_attn_gpu.bwd(
269
- dout,
270
- q,
271
- k,
272
- v,
273
- out,
274
- softmax_lse,
275
- dq,
276
- dk,
277
- dv,
278
- alibi_slopes,
279
- dropout_p,
280
- softmax_scale,
281
- causal,
282
- window_size_left,
283
- window_size_right,
284
- softcap,
285
- deterministic,
286
- None,
287
- rng_state,
288
- )
289
- return softmax_d
290
-
291
-
292
- @_torch_register_fake_wrapper("flash_attn::_flash_attn_backward")
293
- def _flash_attn_backward_fake(
294
- dout: torch.Tensor,
295
- q: torch.Tensor,
296
- k: torch.Tensor,
297
- v: torch.Tensor,
298
- out: torch.Tensor,
299
- softmax_lse: torch.Tensor,
300
- dq: Optional[torch.Tensor],
301
- dk: Optional[torch.Tensor],
302
- dv: Optional[torch.Tensor],
303
- dropout_p: float,
304
- softmax_scale: float,
305
- causal: bool,
306
- window_size_left: int,
307
- window_size_right: int,
308
- softcap: float,
309
- alibi_slopes: Optional[torch.Tensor],
310
- deterministic: bool,
311
- rng_state: Optional[torch.Tensor] = None,
312
- ) -> torch.Tensor:
313
- dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
314
- if dq is None:
315
- dq = torch.empty_like(q)
316
- if dk is None:
317
- dk = torch.empty_like(k)
318
- if dv is None:
319
- dv = torch.empty_like(v)
320
- batch_size, seqlen_q, num_heads, _ = q.shape
321
- softmax_d = torch.empty((batch_size, num_heads, round_multiple(seqlen_q, 128)), device=q.device, dtype=torch.float32)
322
-
323
- return softmax_d
324
-
325
-
326
- if torch.__version__ >= "2.4.0":
327
- _wrapped_flash_attn_backward = torch.ops.flash_attn._flash_attn_backward
328
- else:
329
- _wrapped_flash_attn_backward = _flash_attn_backward
330
-
331
-
332
- @_torch_custom_op_wrapper("flash_attn::_flash_attn_varlen_backward", mutates_args=("dq", "dk", "dv"), device_types="cuda")
333
- def _flash_attn_varlen_backward(
334
- dout: torch.Tensor,
335
- q: torch.Tensor,
336
- k: torch.Tensor,
337
- v: torch.Tensor,
338
- out: torch.Tensor,
339
- softmax_lse: torch.Tensor,
340
- dq: Optional[torch.Tensor],
341
- dk: Optional[torch.Tensor],
342
- dv: Optional[torch.Tensor],
343
- cu_seqlens_q: torch.Tensor,
344
- cu_seqlens_k: torch.Tensor,
345
- max_seqlen_q: int,
346
- max_seqlen_k: int,
347
- dropout_p: float,
348
- softmax_scale: float,
349
- causal: bool,
350
- window_size_left: int,
351
- window_size_right: int,
352
- softcap: float,
353
- alibi_slopes: Optional[torch.Tensor],
354
- deterministic: bool,
355
- rng_state: Optional[torch.Tensor] = None,
356
- zero_tensors: bool = False,
357
- ) -> torch.Tensor:
358
- # dq, dk, dv are allocated by us so they should already be contiguous
359
- dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
360
- (
361
- dq,
362
- dk,
363
- dv,
364
- softmax_d,
365
- ) = flash_attn_gpu.varlen_bwd(
366
- dout,
367
- q,
368
- k,
369
- v,
370
- out,
371
- softmax_lse,
372
- dq,
373
- dk,
374
- dv,
375
- cu_seqlens_q,
376
- cu_seqlens_k,
377
- alibi_slopes,
378
- max_seqlen_q,
379
- max_seqlen_k,
380
- dropout_p,
381
- softmax_scale,
382
- zero_tensors,
383
- causal,
384
- window_size_left,
385
- window_size_right,
386
- softcap,
387
- deterministic,
388
- None,
389
- rng_state,
390
- )
391
- # if dk.isnan().any() or dk.isnan().any() or dv.isnan().any() or softmax_d.isnan().any():
392
- # breakpoint()
393
- return softmax_d
394
-
395
-
396
- @_torch_register_fake_wrapper("flash_attn::_flash_attn_varlen_backward")
397
- def _flash_attn_varlen_backward_fake(
398
- dout: torch.Tensor,
399
- q: torch.Tensor,
400
- k: torch.Tensor,
401
- v: torch.Tensor,
402
- out: torch.Tensor,
403
- softmax_lse: torch.Tensor,
404
- dq: Optional[torch.Tensor],
405
- dk: Optional[torch.Tensor],
406
- dv: Optional[torch.Tensor],
407
- cu_seqlens_q: torch.Tensor,
408
- cu_seqlens_k: torch.Tensor,
409
- max_seqlen_q: int,
410
- max_seqlen_k: int,
411
- dropout_p: float,
412
- softmax_scale: float,
413
- causal: bool,
414
- window_size_left: int,
415
- window_size_right: int,
416
- softcap: float,
417
- alibi_slopes: Optional[torch.Tensor],
418
- deterministic: bool,
419
- rng_state: Optional[torch.Tensor] = None,
420
- zero_tensors: bool = False,
421
- ) -> torch.Tensor:
422
- dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
423
- batch_size = cu_seqlens_q.numel() - 1
424
- total_q, num_heads, _ = q.shape
425
-
426
- if dq is None:
427
- dq = torch.empty_like(q)
428
- if dk is None:
429
- dk = torch.empty_like(k)
430
- if dv is None:
431
- dv = torch.empty_like(v)
432
- softmax_d = torch.empty((num_heads, total_q + 128 * batch_size), device=q.device, dtype=torch.float32)
433
-
434
- return softmax_d
435
-
436
-
437
- if torch.__version__ >= "2.4.0":
438
- _wrapped_flash_attn_varlen_backward = torch.ops.flash_attn._flash_attn_varlen_backward
439
- else:
440
- _wrapped_flash_attn_varlen_backward = _flash_attn_varlen_backward
441
-
442
-
443
- class FlashAttnQKVPackedFunc(torch.autograd.Function):
444
- @staticmethod
445
- def forward(
446
- ctx,
447
- qkv,
448
- dropout_p,
449
- softmax_scale,
450
- causal,
451
- window_size,
452
- softcap,
453
- alibi_slopes,
454
- deterministic,
455
- return_softmax,
456
- is_grad_enabled,
457
- ):
458
- is_grad = is_grad_enabled and qkv.requires_grad
459
- if softmax_scale is None:
460
- softmax_scale = qkv.shape[-1] ** (-0.5)
461
- q, k, v = qkv[:, :, 0].detach(), qkv[:, :, 1].detach(), qkv[:, :, 2].detach()
462
- head_size_og = q.size(3)
463
- if head_size_og % 8 != 0:
464
- q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
465
- k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
466
- v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
467
- out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_forward(
468
- q,
469
- k,
470
- v,
471
- dropout_p,
472
- softmax_scale,
473
- causal=causal,
474
- window_size_left=window_size[0],
475
- window_size_right=window_size[1],
476
- softcap=softcap,
477
- alibi_slopes=alibi_slopes,
478
- return_softmax=return_softmax and dropout_p > 0,
479
- )
480
- if is_grad:
481
- ctx.save_for_backward(q, k, v, out_padded, softmax_lse, rng_state)
482
- ctx.dropout_p = dropout_p
483
- ctx.softmax_scale = softmax_scale
484
- ctx.causal = causal
485
- ctx.window_size = window_size
486
- ctx.softcap = softcap
487
- ctx.alibi_slopes = alibi_slopes
488
- ctx.deterministic = deterministic
489
- out = out_padded[..., :head_size_og]
490
- return out if not return_softmax else (out, softmax_lse, S_dmask)
491
-
492
- @staticmethod
493
- def backward(ctx, dout, *args):
494
- q, k, v, out, softmax_lse, rng_state = ctx.saved_tensors
495
- qkv_shape = q.shape[:-2] + (3, *q.shape[-2:])
496
- dqkv = torch.empty(qkv_shape, dtype=q.dtype, device=q.device)
497
- head_size_og = dout.size(3)
498
- dout_padded = dout
499
- if head_size_og % 8 != 0:
500
- dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
501
- _wrapped_flash_attn_backward(
502
- dout_padded,
503
- q,
504
- k,
505
- v,
506
- out,
507
- softmax_lse,
508
- dqkv[:, :, 0],
509
- dqkv[:, :, 1],
510
- dqkv[:, :, 2],
511
- ctx.dropout_p,
512
- ctx.softmax_scale,
513
- ctx.causal,
514
- ctx.window_size[0],
515
- ctx.window_size[1],
516
- ctx.softcap,
517
- ctx.alibi_slopes,
518
- ctx.deterministic,
519
- rng_state=rng_state,
520
- )
521
- dqkv = dqkv[..., : dout.shape[-1]] # We could have padded the head dimension
522
- return dqkv, None, None, None, None, None, None, None, None, None
523
-
524
-
525
- class FlashAttnVarlenQKVPackedFunc(torch.autograd.Function):
526
- @staticmethod
527
- def forward(
528
- ctx,
529
- qkv,
530
- cu_seqlens,
531
- max_seqlen,
532
- dropout_p,
533
- softmax_scale,
534
- causal,
535
- window_size,
536
- softcap,
537
- alibi_slopes,
538
- deterministic,
539
- return_softmax,
540
- is_grad_enabled,
541
- ):
542
- is_grad = is_grad_enabled and qkv.requires_grad
543
- if softmax_scale is None:
544
- softmax_scale = qkv.shape[-1] ** (-0.5)
545
- q, k, v = qkv[:, 0].detach(), qkv[:, 1].detach(), qkv[:, 2].detach()
546
- head_size_og = q.size(2)
547
- if head_size_og % 8 != 0:
548
- q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
549
- k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
550
- v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
551
- out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_varlen_forward(
552
- q,
553
- k,
554
- v,
555
- cu_seqlens,
556
- cu_seqlens,
557
- max_seqlen,
558
- max_seqlen,
559
- dropout_p,
560
- softmax_scale,
561
- causal=causal,
562
- window_size_left=window_size[0],
563
- window_size_right=window_size[1],
564
- softcap=softcap,
565
- alibi_slopes=alibi_slopes,
566
- return_softmax=return_softmax and dropout_p > 0,
567
- block_table=None,
568
- )
569
- if is_grad:
570
- ctx.save_for_backward(q, k, v, out_padded, softmax_lse, cu_seqlens, rng_state)
571
- ctx.dropout_p = dropout_p
572
- ctx.max_seqlen = max_seqlen
573
- ctx.softmax_scale = softmax_scale
574
- ctx.causal = causal
575
- ctx.window_size = window_size
576
- ctx.softcap = softcap
577
- ctx.alibi_slopes = alibi_slopes
578
- ctx.deterministic = deterministic
579
- out = out_padded[..., :head_size_og]
580
- return out if not return_softmax else (out, softmax_lse, S_dmask)
581
-
582
- @staticmethod
583
- def backward(ctx, dout, *args):
584
- q, k, v, out, softmax_lse, cu_seqlens, rng_state = ctx.saved_tensors
585
- qkv_shape = q.shape[:-2] + (3, *q.shape[-2:])
586
- dqkv = torch.empty(qkv_shape, dtype=q.dtype, device=q.device)
587
- head_size_og = dout.size(2)
588
- dout_padded = dout
589
- if head_size_og % 8 != 0:
590
- dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
591
- _wrapped_flash_attn_varlen_backward(
592
- dout_padded,
593
- q,
594
- k,
595
- v,
596
- out,
597
- softmax_lse,
598
- dqkv[:, 0],
599
- dqkv[:, 1],
600
- dqkv[:, 2],
601
- cu_seqlens,
602
- cu_seqlens,
603
- ctx.max_seqlen,
604
- ctx.max_seqlen,
605
- ctx.dropout_p,
606
- ctx.softmax_scale,
607
- ctx.causal,
608
- ctx.window_size[0],
609
- ctx.window_size[1],
610
- ctx.softcap,
611
- ctx.alibi_slopes,
612
- ctx.deterministic,
613
- rng_state=rng_state,
614
- )
615
- dqkv = dqkv[..., : dout.shape[-1]] # We could have padded the head dimension
616
- return dqkv, None, None, None, None, None, None, None, None, None, None, None
617
-
618
-
619
- class FlashAttnKVPackedFunc(torch.autograd.Function):
620
- @staticmethod
621
- def forward(
622
- ctx,
623
- q,
624
- kv,
625
- dropout_p,
626
- softmax_scale,
627
- causal,
628
- window_size,
629
- softcap,
630
- alibi_slopes,
631
- deterministic,
632
- return_softmax,
633
- is_grad_enabled,
634
- ):
635
- is_grad = is_grad_enabled and any(
636
- x.requires_grad for x in [q, kv]
637
- )
638
- if softmax_scale is None:
639
- softmax_scale = q.shape[-1] ** (-0.5)
640
- k, v = kv[:, :, 0].detach(), kv[:, :, 1].detach()
641
- head_size_og = q.size(3)
642
- if head_size_og % 8 != 0:
643
- q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
644
- k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
645
- v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
646
- out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_forward(
647
- q,
648
- k,
649
- v,
650
- dropout_p,
651
- softmax_scale,
652
- causal=causal,
653
- window_size_left=window_size[0],
654
- window_size_right=window_size[1],
655
- softcap=softcap,
656
- alibi_slopes=alibi_slopes,
657
- return_softmax=return_softmax and dropout_p > 0,
658
- )
659
- if is_grad:
660
- ctx.save_for_backward(q, k, v, out_padded, softmax_lse, rng_state)
661
- ctx.dropout_p = dropout_p
662
- ctx.softmax_scale = softmax_scale
663
- ctx.causal = causal
664
- ctx.window_size = window_size
665
- ctx.softcap = softcap
666
- ctx.alibi_slopes = alibi_slopes
667
- ctx.deterministic = deterministic
668
- out = out_padded[..., :head_size_og]
669
- return out if not return_softmax else (out, softmax_lse, S_dmask)
670
-
671
- @staticmethod
672
- def backward(ctx, dout, *args):
673
- q, k, v, out, softmax_lse, rng_state = ctx.saved_tensors
674
- dq = torch.empty_like(q)
675
- kv_shape = k.shape[:-2] + (2, *k.shape[-2:])
676
- dkv = torch.empty(kv_shape, dtype=k.dtype, device=k.device)
677
- head_size_og = dout.size(3)
678
- dout_padded = dout
679
- if head_size_og % 8 != 0:
680
- dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
681
- _wrapped_flash_attn_backward(
682
- dout_padded,
683
- q,
684
- k,
685
- v,
686
- out,
687
- softmax_lse,
688
- dq,
689
- dkv[:, :, 0],
690
- dkv[:, :, 1],
691
- ctx.dropout_p,
692
- ctx.softmax_scale,
693
- ctx.causal,
694
- ctx.window_size[0],
695
- ctx.window_size[1],
696
- ctx.softcap,
697
- ctx.alibi_slopes,
698
- ctx.deterministic,
699
- rng_state=rng_state,
700
- )
701
- dq = dq[..., : dout.shape[-1]] # We could have padded the head dimension
702
- dkv = dkv[..., : dout.shape[-1]]
703
- return dq, dkv, None, None, None, None, None, None, None, None, None
704
-
705
-
706
- class FlashAttnVarlenKVPackedFunc(torch.autograd.Function):
707
- @staticmethod
708
- def forward(
709
- ctx,
710
- q,
711
- kv,
712
- cu_seqlens_q,
713
- cu_seqlens_k,
714
- max_seqlen_q,
715
- max_seqlen_k,
716
- dropout_p,
717
- softmax_scale,
718
- causal,
719
- window_size,
720
- softcap,
721
- alibi_slopes,
722
- deterministic,
723
- return_softmax,
724
- is_grad_enabled,
725
- ):
726
- is_grad = is_grad_enabled and any(
727
- x.requires_grad for x in [q, kv]
728
- )
729
- if softmax_scale is None:
730
- softmax_scale = q.shape[-1] ** (-0.5)
731
- k, v = kv[:, 0].detach(), kv[:, 1].detach()
732
- head_size_og = q.size(2)
733
- if head_size_og % 8 != 0:
734
- q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
735
- k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
736
- v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
737
- out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_varlen_forward(
738
- q,
739
- k,
740
- v,
741
- cu_seqlens_q,
742
- cu_seqlens_k,
743
- max_seqlen_q,
744
- max_seqlen_k,
745
- dropout_p,
746
- softmax_scale,
747
- causal=causal,
748
- window_size_left=window_size[0],
749
- window_size_right=window_size[1],
750
- softcap=softcap,
751
- alibi_slopes=alibi_slopes,
752
- return_softmax=return_softmax and dropout_p > 0,
753
- block_table=None,
754
- )
755
- if is_grad:
756
- ctx.save_for_backward(
757
- q, k, v, out_padded, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state
758
- )
759
- ctx.dropout_p = dropout_p
760
- ctx.max_seqlen_q = max_seqlen_q
761
- ctx.max_seqlen_k = max_seqlen_k
762
- ctx.softmax_scale = softmax_scale
763
- ctx.causal = causal
764
- ctx.window_size = window_size
765
- ctx.softcap = softcap
766
- ctx.alibi_slopes = alibi_slopes
767
- ctx.deterministic = deterministic
768
- out = out_padded[..., :head_size_og]
769
- return out if not return_softmax else (out, softmax_lse, S_dmask)
770
-
771
- @staticmethod
772
- def backward(ctx, dout, *args):
773
- q, k, v, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state = ctx.saved_tensors
774
- dq = torch.empty_like(q)
775
- kv_shape = k.shape[:-2] + (2, *k.shape[-2:])
776
- dkv = torch.empty(kv_shape, dtype=k.dtype, device=k.device)
777
- head_size_og = dout.size(2)
778
- dout_padded = dout
779
- if head_size_og % 8 != 0:
780
- dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
781
- _wrapped_flash_attn_varlen_backward(
782
- dout_padded,
783
- q,
784
- k,
785
- v,
786
- out,
787
- softmax_lse,
788
- dq,
789
- dkv[:, 0],
790
- dkv[:, 1],
791
- cu_seqlens_q,
792
- cu_seqlens_k,
793
- ctx.max_seqlen_q,
794
- ctx.max_seqlen_k,
795
- ctx.dropout_p,
796
- ctx.softmax_scale,
797
- ctx.causal,
798
- ctx.window_size[0],
799
- ctx.window_size[1],
800
- ctx.softcap,
801
- ctx.alibi_slopes,
802
- ctx.deterministic,
803
- rng_state=rng_state,
804
- )
805
- dq = dq[..., : dout.shape[-1]] # We could have padded the head dimension
806
- dkv = dkv[..., : dout.shape[-1]]
807
- return dq, dkv, None, None, None, None, None, None, None, None, None, None, None, None, None
808
-
809
-
810
- class FlashAttnFunc(torch.autograd.Function):
811
- @staticmethod
812
- def forward(
813
- ctx,
814
- q,
815
- k,
816
- v,
817
- dropout_p,
818
- softmax_scale,
819
- causal,
820
- window_size,
821
- softcap,
822
- alibi_slopes,
823
- deterministic,
824
- return_softmax,
825
- is_grad_enabled,
826
- ):
827
- is_grad = is_grad_enabled and any(
828
- x.requires_grad for x in [q, k, v]
829
- )
830
- if softmax_scale is None:
831
- softmax_scale = q.shape[-1] ** (-0.5)
832
- head_size_og = q.size(3)
833
- if head_size_og % 8 != 0:
834
- q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
835
- k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
836
- v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
837
- out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_forward(
838
- q,
839
- k,
840
- v,
841
- dropout_p,
842
- softmax_scale,
843
- causal=causal,
844
- window_size_left=window_size[0],
845
- window_size_right=window_size[1],
846
- softcap=softcap,
847
- alibi_slopes=alibi_slopes,
848
- return_softmax=return_softmax and dropout_p > 0,
849
- )
850
- if is_grad:
851
- ctx.save_for_backward(q, k, v, out_padded, softmax_lse, rng_state)
852
- ctx.dropout_p = dropout_p
853
- ctx.softmax_scale = softmax_scale
854
- ctx.causal = causal
855
- ctx.window_size = window_size
856
- ctx.softcap = softcap
857
- ctx.alibi_slopes = alibi_slopes
858
- ctx.deterministic = deterministic
859
- out = out_padded[..., :head_size_og]
860
- return out if not return_softmax else (out, softmax_lse, S_dmask)
861
-
862
- @staticmethod
863
- def backward(ctx, dout, *args):
864
- q, k, v, out, softmax_lse, rng_state = ctx.saved_tensors
865
- dq, dk, dv = torch.empty_like(q), torch.empty_like(k), torch.empty_like(v)
866
- head_size_og = dout.size(3)
867
- dout_padded = dout
868
- if head_size_og % 8 != 0:
869
- dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
870
- _wrapped_flash_attn_backward(
871
- dout_padded,
872
- q,
873
- k,
874
- v,
875
- out,
876
- softmax_lse,
877
- dq,
878
- dk,
879
- dv,
880
- ctx.dropout_p,
881
- ctx.softmax_scale,
882
- ctx.causal,
883
- ctx.window_size[0],
884
- ctx.window_size[1],
885
- ctx.softcap,
886
- ctx.alibi_slopes,
887
- ctx.deterministic,
888
- rng_state=rng_state,
889
- )
890
- dq = dq[..., : dout.shape[-1]] # We could have padded the head dimension
891
- dk = dk[..., : dout.shape[-1]]
892
- dv = dv[..., : dout.shape[-1]]
893
- return dq, dk, dv, None, None, None, None, None, None, None, None, None
894
-
895
-
896
- class FlashAttnVarlenFunc(torch.autograd.Function):
897
- @staticmethod
898
- def forward(
899
- ctx,
900
- q,
901
- k,
902
- v,
903
- cu_seqlens_q,
904
- cu_seqlens_k,
905
- max_seqlen_q,
906
- max_seqlen_k,
907
- dropout_p,
908
- softmax_scale,
909
- causal,
910
- window_size,
911
- softcap,
912
- alibi_slopes,
913
- deterministic,
914
- return_softmax,
915
- block_table,
916
- is_grad_enabled,
917
- ):
918
- is_grad = is_grad_enabled and any(
919
- x.requires_grad for x in [q, k, v]
920
- )
921
- if softmax_scale is None:
922
- softmax_scale = q.shape[-1] ** (-0.5)
923
- head_size_og = q.size(2)
924
- if head_size_og % 8 != 0:
925
- q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
926
- k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
927
- v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
928
- out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_varlen_forward(
929
- q,
930
- k,
931
- v,
932
- cu_seqlens_q,
933
- cu_seqlens_k,
934
- max_seqlen_q,
935
- max_seqlen_k,
936
- dropout_p,
937
- softmax_scale,
938
- causal=causal,
939
- window_size_left=window_size[0],
940
- window_size_right=window_size[1],
941
- softcap=softcap,
942
- alibi_slopes=alibi_slopes,
943
- return_softmax=return_softmax and dropout_p > 0,
944
- block_table=block_table,
945
- )
946
- if is_grad:
947
- ctx.save_for_backward(
948
- q, k, v, out_padded, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state
949
- )
950
- ctx.dropout_p = dropout_p
951
- ctx.max_seqlen_q = max_seqlen_q
952
- ctx.max_seqlen_k = max_seqlen_k
953
- ctx.softmax_scale = softmax_scale
954
- ctx.causal = causal
955
- ctx.window_size = window_size
956
- ctx.softcap = softcap
957
- ctx.alibi_slopes = alibi_slopes
958
- ctx.deterministic = deterministic
959
-
960
- out = out_padded[..., :head_size_og]
961
- return out if not return_softmax else (out, softmax_lse, S_dmask)
962
-
963
- @staticmethod
964
- def backward(ctx, dout, *args):
965
- q, k, v, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state = ctx.saved_tensors
966
- dq, dk, dv = torch.empty_like(q), torch.empty_like(k), torch.empty_like(v)
967
- head_size_og = dout.size(2)
968
- dout_padded = dout
969
- if head_size_og % 8 != 0:
970
- dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
971
- _wrapped_flash_attn_varlen_backward(
972
- dout_padded,
973
- q,
974
- k,
975
- v,
976
- out,
977
- softmax_lse,
978
- dq,
979
- dk,
980
- dv,
981
- cu_seqlens_q,
982
- cu_seqlens_k,
983
- ctx.max_seqlen_q,
984
- ctx.max_seqlen_k,
985
- ctx.dropout_p,
986
- ctx.softmax_scale,
987
- ctx.causal,
988
- ctx.window_size[0],
989
- ctx.window_size[1],
990
- ctx.softcap,
991
- ctx.alibi_slopes,
992
- ctx.deterministic,
993
- rng_state=rng_state,
994
- )
995
- dq = dq[..., : dout.shape[-1]] # We could have padded the head dimension
996
- dk = dk[..., : dout.shape[-1]]
997
- dv = dv[..., : dout.shape[-1]]
998
- return dq, dk, dv, None, None, None, None, None, None, None, None, None, None, None, None, None, None
999
-
1000
-
1001
- def flash_attn_qkvpacked_func(
1002
- qkv,
1003
- dropout_p=0.0,
1004
- softmax_scale=None,
1005
- causal=False,
1006
- window_size=(-1, -1), # -1 means infinite context window
1007
- softcap=0.0, # <=0.0 means deactivate
1008
- alibi_slopes=None,
1009
- deterministic=False,
1010
- return_attn_probs=False,
1011
- ):
1012
- """dropout_p should be set to 0.0 during evaluation
1013
- If Q, K, V are already stacked into 1 tensor, this function will be faster than
1014
- calling flash_attn_func on Q, K, V since the backward pass avoids explicit concatenation
1015
- of the gradients of Q, K, V.
1016
- For multi-query and grouped-query attention (MQA/GQA), please see
1017
- flash_attn_kvpacked_func and flash_attn_func.
1018
-
1019
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
1020
- will only attend to keys between [i - window_size[0], i + window_size[1]] inclusive.
1021
-
1022
- Arguments:
1023
- qkv: (batch_size, seqlen, 3, nheads, headdim)
1024
- dropout_p: float. Dropout probability.
1025
- softmax_scale: float. The scaling of QK^T before applying softmax.
1026
- Default to 1 / sqrt(headdim).
1027
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1028
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1029
- softcap: float. Anything > 0 activates softcapping attention.
1030
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of (-alibi_slope * |i - j|) is added to
1031
- the attention score of query i and key j.
1032
- deterministic: bool. Whether to use the deterministic implementation of the backward pass,
1033
- which is slightly slower and uses more memory. The forward pass is always deterministic.
1034
- return_attn_probs: bool. Whether to return the attention probabilities. This option is for
1035
- testing only. The returned probabilities are not guaranteed to be correct
1036
- (they might not have the right scaling).
1037
- Return:
1038
- out: (batch_size, seqlen, nheads, headdim).
1039
- softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
1040
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1041
- normalization factor).
1042
- S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
1043
- The output of softmax (possibly with different scaling). It also encodes the dropout
1044
- pattern (negative means that location was dropped, nonnegative means it was kept).
1045
- """
1046
- return FlashAttnQKVPackedFunc.apply(
1047
- qkv,
1048
- dropout_p,
1049
- softmax_scale,
1050
- causal,
1051
- window_size,
1052
- softcap,
1053
- alibi_slopes,
1054
- deterministic,
1055
- return_attn_probs,
1056
- torch.is_grad_enabled(),
1057
- )
1058
-
1059
-
1060
- def flash_attn_kvpacked_func(
1061
- q,
1062
- kv,
1063
- dropout_p=0.0,
1064
- softmax_scale=None,
1065
- causal=False,
1066
- window_size=(-1, -1), # -1 means infinite context window
1067
- softcap=0.0, # 0.0 means deactivated
1068
- alibi_slopes=None,
1069
- deterministic=False,
1070
- return_attn_probs=False,
1071
- ):
1072
- """dropout_p should be set to 0.0 during evaluation
1073
- If K, V are already stacked into 1 tensor, this function will be faster than
1074
- calling flash_attn_func on Q, K, V since the backward pass avoids explicit concatenation
1075
- of the gradients of K, V.
1076
- Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
1077
- than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
1078
- For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
1079
- 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
1080
-
1081
- If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
1082
- For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
1083
- 1 1 1 1 0
1084
- 1 1 1 1 1
1085
- If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
1086
- 0 0
1087
- 0 0
1088
- 0 0
1089
- 1 0
1090
- 1 1
1091
- If the row of the mask is all zero, the output will be zero.
1092
-
1093
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
1094
- will only attend to keys between
1095
- [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
1096
-
1097
- Arguments:
1098
- q: (batch_size, seqlen, nheads, headdim)
1099
- kv: (batch_size, seqlen, 2, nheads_k, headdim)
1100
- dropout_p: float. Dropout probability.
1101
- softmax_scale: float. The scaling of QK^T before applying softmax.
1102
- Default to 1 / sqrt(headdim).
1103
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1104
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1105
- softcap: float. Anything > 0 activates softcapping attention.
1106
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
1107
- (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
1108
- is added to the attention score of query i and key j.
1109
- deterministic: bool. Whether to use the deterministic implementation of the backward pass,
1110
- which is slightly slower and uses more memory. The forward pass is always deterministic.
1111
- return_attn_probs: bool. Whether to return the attention probabilities. This option is for
1112
- testing only. The returned probabilities are not guaranteed to be correct
1113
- (they might not have the right scaling).
1114
- Return:
1115
- out: (batch_size, seqlen, nheads, headdim).
1116
- softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
1117
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1118
- normalization factor).
1119
- S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
1120
- The output of softmax (possibly with different scaling). It also encodes the dropout
1121
- pattern (negative means that location was dropped, nonnegative means it was kept).
1122
- """
1123
- return FlashAttnKVPackedFunc.apply(
1124
- q,
1125
- kv,
1126
- dropout_p,
1127
- softmax_scale,
1128
- causal,
1129
- window_size,
1130
- softcap,
1131
- alibi_slopes,
1132
- deterministic,
1133
- return_attn_probs,
1134
- torch.is_grad_enabled(),
1135
- )
1136
-
1137
-
1138
- def flash_attn_func(
1139
- q,
1140
- k,
1141
- v,
1142
- dropout_p=0.0,
1143
- softmax_scale=None,
1144
- causal=False,
1145
- window_size=(-1, -1), # -1 means infinite context window
1146
- softcap=0.0, # 0.0 means deactivated
1147
- alibi_slopes=None,
1148
- deterministic=False,
1149
- return_attn_probs=False,
1150
- ):
1151
- """dropout_p should be set to 0.0 during evaluation
1152
- Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
1153
- than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
1154
- For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
1155
- 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
1156
-
1157
- If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
1158
- For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
1159
- 1 1 1 1 0
1160
- 1 1 1 1 1
1161
- If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
1162
- 0 0
1163
- 0 0
1164
- 0 0
1165
- 1 0
1166
- 1 1
1167
- If the row of the mask is all zero, the output will be zero.
1168
-
1169
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
1170
- will only attend to keys between
1171
- [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
1172
-
1173
- Arguments:
1174
- q: (batch_size, seqlen, nheads, headdim)
1175
- k: (batch_size, seqlen, nheads_k, headdim)
1176
- v: (batch_size, seqlen, nheads_k, headdim)
1177
- dropout_p: float. Dropout probability.
1178
- softmax_scale: float. The scaling of QK^T before applying softmax.
1179
- Default to 1 / sqrt(headdim).
1180
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1181
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1182
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
1183
- (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
1184
- is added to the attention score of query i and key j.
1185
- deterministic: bool. Whether to use the deterministic implementation of the backward pass,
1186
- which is slightly slower and uses more memory. The forward pass is always deterministic.
1187
- return_attn_probs: bool. Whether to return the attention probabilities. This option is for
1188
- testing only. The returned probabilities are not guaranteed to be correct
1189
- (they might not have the right scaling).
1190
- Return:
1191
- out: (batch_size, seqlen, nheads, headdim).
1192
- softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
1193
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1194
- normalization factor).
1195
- S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
1196
- The output of softmax (possibly with different scaling). It also encodes the dropout
1197
- pattern (negative means that location was dropped, nonnegative means it was kept).
1198
- """
1199
- return FlashAttnFunc.apply(
1200
- q,
1201
- k,
1202
- v,
1203
- dropout_p,
1204
- softmax_scale,
1205
- causal,
1206
- window_size,
1207
- softcap,
1208
- alibi_slopes,
1209
- deterministic,
1210
- return_attn_probs,
1211
- torch.is_grad_enabled(),
1212
- )
1213
-
1214
-
1215
- def flash_attn_varlen_qkvpacked_func(
1216
- qkv,
1217
- cu_seqlens,
1218
- max_seqlen,
1219
- dropout_p=0.0,
1220
- softmax_scale=None,
1221
- causal=False,
1222
- window_size=(-1, -1), # -1 means infinite context window
1223
- softcap=0.0, # 0.0 means deactivated
1224
- alibi_slopes=None,
1225
- deterministic=False,
1226
- return_attn_probs=False,
1227
- ):
1228
- """dropout_p should be set to 0.0 during evaluation
1229
- If Q, K, V are already stacked into 1 tensor, this function will be faster than
1230
- calling flash_attn_varlen_func on Q, K, V since the backward pass avoids explicit concatenation
1231
- of the gradients of Q, K, V.
1232
- For multi-query and grouped-query attention (MQA/GQA), please see
1233
- flash_attn_varlen_kvpacked_func and flash_attn_varlen_func.
1234
-
1235
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
1236
- will only attend to keys between [i - window_size[0], i + window_size[1]] inclusive.
1237
-
1238
- Arguments:
1239
- qkv: (total, 3, nheads, headdim), where total = total number of tokens in the batch.
1240
- cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
1241
- of the sequences in the batch, used to index into qkv.
1242
- max_seqlen: int. Maximum sequence length in the batch.
1243
- dropout_p: float. Dropout probability.
1244
- softmax_scale: float. The scaling of QK^T before applying softmax.
1245
- Default to 1 / sqrt(headdim).
1246
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1247
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1248
- softcap: float. Anything > 0 activates softcapping attention.
1249
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of (-alibi_slope * |i - j|)
1250
- is added to the attention score of query i and key j.
1251
- deterministic: bool. Whether to use the deterministic implementation of the backward pass,
1252
- which is slightly slower and uses more memory. The forward pass is always deterministic.
1253
- return_attn_probs: bool. Whether to return the attention probabilities. This option is for
1254
- testing only. The returned probabilities are not guaranteed to be correct
1255
- (they might not have the right scaling).
1256
- Return:
1257
- out: (total, nheads, headdim).
1258
- softmax_lse [optional, if return_attn_probs=True]: (nheads, total_q_seqlen). The
1259
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1260
- normalization factor).
1261
- S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
1262
- The output of softmax (possibly with different scaling). It also encodes the dropout
1263
- pattern (negative means that location was dropped, nonnegative means it was kept).
1264
- """
1265
- return FlashAttnVarlenQKVPackedFunc.apply(
1266
- qkv,
1267
- cu_seqlens,
1268
- max_seqlen,
1269
- dropout_p,
1270
- softmax_scale,
1271
- causal,
1272
- window_size,
1273
- softcap,
1274
- alibi_slopes,
1275
- deterministic,
1276
- return_attn_probs,
1277
- torch.is_grad_enabled(),
1278
- )
1279
-
1280
-
1281
- def flash_attn_varlen_kvpacked_func(
1282
- q,
1283
- kv,
1284
- cu_seqlens_q,
1285
- cu_seqlens_k,
1286
- max_seqlen_q,
1287
- max_seqlen_k,
1288
- dropout_p=0.0,
1289
- softmax_scale=None,
1290
- causal=False,
1291
- window_size=(-1, -1), # -1 means infinite context window
1292
- softcap=0.0, # 0.0 means deactivated
1293
- alibi_slopes=None,
1294
- deterministic=False,
1295
- return_attn_probs=False,
1296
- ):
1297
- """dropout_p should be set to 0.0 during evaluation
1298
- If K, V are already stacked into 1 tensor, this function will be faster than
1299
- calling flash_attn_func on Q, K, V since the backward pass avoids explicit concatenation
1300
- of the gradients of K, V.
1301
- Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
1302
- than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
1303
- For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
1304
- 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
1305
-
1306
- If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
1307
- For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
1308
- 1 1 1 1 0
1309
- 1 1 1 1 1
1310
- If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
1311
- 0 0
1312
- 0 0
1313
- 0 0
1314
- 1 0
1315
- 1 1
1316
- If the row of the mask is all zero, the output will be zero.
1317
-
1318
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
1319
- will only attend to keys between
1320
- [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
1321
-
1322
- Arguments:
1323
- q: (total_q, nheads, headdim), where total_q = total number of query tokens in the batch.
1324
- kv: (total_k, 2, nheads_k, headdim), where total_k = total number of key tokens in the batch.
1325
- cu_seqlens_q: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
1326
- of the sequences in the batch, used to index into q.
1327
- cu_seqlens_k: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
1328
- of the sequences in the batch, used to index into kv.
1329
- max_seqlen_q: int. Maximum query sequence length in the batch.
1330
- max_seqlen_k: int. Maximum key sequence length in the batch.
1331
- dropout_p: float. Dropout probability.
1332
- softmax_scale: float. The scaling of QK^T before applying softmax.
1333
- Default to 1 / sqrt(headdim).
1334
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1335
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1336
- softcap: float. Anything > 0 activates softcapping attention.
1337
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
1338
- (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
1339
- is added to the attention score of query i and key j.
1340
- deterministic: bool. Whether to use the deterministic implementation of the backward pass,
1341
- which is slightly slower and uses more memory. The forward pass is always deterministic.
1342
- return_attn_probs: bool. Whether to return the attention probabilities. This option is for
1343
- testing only. The returned probabilities are not guaranteed to be correct
1344
- (they might not have the right scaling).
1345
- Return:
1346
- out: (total, nheads, headdim).
1347
- softmax_lse [optional, if return_attn_probs=True]: (nheads, total_q_seqlen). The
1348
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1349
- normalization factor).
1350
- S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
1351
- The output of softmax (possibly with different scaling). It also encodes the dropout
1352
- pattern (negative means that location was dropped, nonnegative means it was kept).
1353
- """
1354
- return FlashAttnVarlenKVPackedFunc.apply(
1355
- q,
1356
- kv,
1357
- cu_seqlens_q,
1358
- cu_seqlens_k,
1359
- max_seqlen_q,
1360
- max_seqlen_k,
1361
- dropout_p,
1362
- softmax_scale,
1363
- causal,
1364
- window_size,
1365
- softcap,
1366
- alibi_slopes,
1367
- deterministic,
1368
- return_attn_probs,
1369
- torch.is_grad_enabled(),
1370
- )
1371
-
1372
-
1373
- def flash_attn_varlen_func(
1374
- q,
1375
- k,
1376
- v,
1377
- cu_seqlens_q,
1378
- cu_seqlens_k,
1379
- max_seqlen_q,
1380
- max_seqlen_k,
1381
- dropout_p=0.0,
1382
- softmax_scale=None,
1383
- causal=False,
1384
- window_size=(-1, -1), # -1 means infinite context window
1385
- softcap=0.0, # 0.0 means deactivated
1386
- alibi_slopes=None,
1387
- deterministic=False,
1388
- return_attn_probs=False,
1389
- block_table=None,
1390
- ):
1391
- """dropout_p should be set to 0.0 during evaluation
1392
- Supports multi-query and grouped-query attention (MQA/GQA) by passing in K, V with fewer heads
1393
- than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
1394
- For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
1395
- 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
1396
-
1397
- If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
1398
- For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
1399
- 1 1 1 1 0
1400
- 1 1 1 1 1
1401
- If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
1402
- 0 0
1403
- 0 0
1404
- 0 0
1405
- 1 0
1406
- 1 1
1407
- If the row of the mask is all zero, the output will be zero.
1408
-
1409
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
1410
- will only attend to keys between
1411
- [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
1412
-
1413
- Arguments:
1414
- q: (total_q, nheads, headdim), where total_q = total number of query tokens in the batch.
1415
- k: (total_k, nheads_k, headdim), where total_k = total number of key tokens in the batch.
1416
- v: (total_k, nheads_k, headdim), where total_k = total number of key tokens in the batch.
1417
- cu_seqlens_q: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
1418
- of the sequences in the batch, used to index into q.
1419
- cu_seqlens_k: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
1420
- of the sequences in the batch, used to index into kv.
1421
- max_seqlen_q: int. Maximum query sequence length in the batch.
1422
- max_seqlen_k: int. Maximum key sequence length in the batch.
1423
- dropout_p: float. Dropout probability.
1424
- softmax_scale: float. The scaling of QK^T before applying softmax.
1425
- Default to 1 / sqrt(headdim).
1426
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1427
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1428
- softcap: float. Anything > 0 activates softcapping attention.
1429
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
1430
- (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
1431
- is added to the attention score of query i and key j.
1432
- deterministic: bool. Whether to use the deterministic implementation of the backward pass,
1433
- which is slightly slower and uses more memory. The forward pass is always deterministic.
1434
- return_attn_probs: bool. Whether to return the attention probabilities. This option is for
1435
- testing only. The returned probabilities are not guaranteed to be correct
1436
- (they might not have the right scaling).
1437
- Return:
1438
- out: (total, nheads, headdim).
1439
- softmax_lse [optional, if return_attn_probs=True]: (nheads, total_q_seqlen). The
1440
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1441
- normalization factor).
1442
- S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
1443
- The output of softmax (possibly with different scaling). It also encodes the dropout
1444
- pattern (negative means that location was dropped, nonnegative means it was kept).
1445
- """
1446
- return FlashAttnVarlenFunc.apply(
1447
- q,
1448
- k,
1449
- v,
1450
- cu_seqlens_q,
1451
- cu_seqlens_k,
1452
- max_seqlen_q,
1453
- max_seqlen_k,
1454
- dropout_p,
1455
- softmax_scale,
1456
- causal,
1457
- window_size,
1458
- softcap,
1459
- alibi_slopes,
1460
- deterministic,
1461
- return_attn_probs,
1462
- block_table,
1463
- torch.is_grad_enabled(),
1464
- )
1465
-
1466
-
1467
- def flash_attn_with_kvcache(
1468
- q,
1469
- k_cache,
1470
- v_cache,
1471
- k=None,
1472
- v=None,
1473
- rotary_cos=None,
1474
- rotary_sin=None,
1475
- cache_seqlens: Optional[Union[(int, torch.Tensor)]] = None,
1476
- cache_batch_idx: Optional[torch.Tensor] = None,
1477
- cache_leftpad: Optional[torch.Tensor] = None,
1478
- block_table: Optional[torch.Tensor] = None,
1479
- softmax_scale=None,
1480
- causal=False,
1481
- window_size=(-1, -1), # -1 means infinite context window
1482
- softcap=0.0, # 0.0 means deactivated
1483
- rotary_interleaved=True,
1484
- alibi_slopes=None,
1485
- num_splits=0,
1486
- return_softmax_lse=False,
1487
- ):
1488
- """
1489
- If k and v are not None, k_cache and v_cache will be updated *inplace* with the new values from
1490
- k and v. This is useful for incremental decoding: you can pass in the cached keys/values from
1491
- the previous step, and update them with the new keys/values from the current step, and do
1492
- attention with the updated cache, all in 1 kernel.
1493
-
1494
- If you pass in k / v, you must make sure that the cache is large enough to hold the new values.
1495
- For example, the KV cache could be pre-allocated with the max sequence length, and you can use
1496
- cache_seqlens to keep track of the current sequence lengths of each sequence in the batch.
1497
-
1498
- Also apply rotary embedding if rotary_cos and rotary_sin are passed in. The key @k will be
1499
- rotated by rotary_cos and rotary_sin at indices cache_seqlens, cache_seqlens + 1, etc.
1500
- If causal or local (i.e., window_size != (-1, -1)), the query @q will be rotated by rotary_cos
1501
- and rotary_sin at indices cache_seqlens, cache_seqlens + 1, etc.
1502
- If not causal and not local, the query @q will be rotated by rotary_cos and rotary_sin at
1503
- indices cache_seqlens only (i.e. we consider all tokens in @q to be at position cache_seqlens).
1504
-
1505
- See tests/test_flash_attn.py::test_flash_attn_kvcache for examples of how to use this function.
1506
-
1507
- Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
1508
- than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
1509
- For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
1510
- 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
1511
-
1512
- If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
1513
- For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
1514
- 1 1 1 1 0
1515
- 1 1 1 1 1
1516
- If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
1517
- 0 0
1518
- 0 0
1519
- 0 0
1520
- 1 0
1521
- 1 1
1522
- If the row of the mask is all zero, the output will be zero.
1523
-
1524
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
1525
- will only attend to keys between
1526
- [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
1527
-
1528
- Note: Does not support backward pass.
1529
-
1530
- Arguments:
1531
- q: (batch_size, seqlen, nheads, headdim)
1532
- k_cache: (batch_size_cache, seqlen_cache, nheads_k, headdim) if there's no block_table,
1533
- or (num_blocks, page_block_size, nheads_k, headdim) if there's a block_table (i.e. paged KV cache)
1534
- page_block_size must be a multiple of 256.
1535
- v_cache: (batch_size_cache, seqlen_cache, nheads_k, headdim) if there's no block_table,
1536
- or (num_blocks, page_block_size, nheads_k, headdim) if there's a block_table (i.e. paged KV cache)
1537
- k [optional]: (batch_size, seqlen_new, nheads_k, headdim). If not None, we concatenate
1538
- k with k_cache, starting at the indices specified by cache_seqlens.
1539
- v [optional]: (batch_size, seqlen_new, nheads_k, headdim). Similar to k.
1540
- rotary_cos [optional]: (seqlen_ro, rotary_dim / 2). If not None, we apply rotary embedding
1541
- to k and q. Only applicable if k and v are passed in. rotary_dim must be divisible by 16.
1542
- rotary_sin [optional]: (seqlen_ro, rotary_dim / 2). Similar to rotary_cos.
1543
- cache_seqlens: int, or (batch_size,), dtype torch.int32. The sequence lengths of the
1544
- KV cache.
1545
- cache_batch_idx: (batch_size,), dtype torch.int32. The indices used to index into the KV cache.
1546
- If None, we assume that the batch indices are [0, 1, 2, ..., batch_size - 1].
1547
- If the indices are not distinct, and k and v are provided, the values updated in the cache
1548
- might come from any of the duplicate indices.
1549
- cache_leftpad: (batch_size,), dtype torch.int32. The index that the KV cache starts. If None, assume 0.
1550
- block_table [optional]: (batch_size, max_num_blocks_per_seq), dtype torch.int32.
1551
- softmax_scale: float. The scaling of QK^T before applying softmax.
1552
- Default to 1 / sqrt(headdim).
1553
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1554
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1555
- softcap: float. Anything > 0 activates softcapping attention.
1556
- rotary_interleaved: bool. Only applicable if rotary_cos and rotary_sin are passed in.
1557
- If True, rotary embedding will combine dimensions 0 & 1, 2 & 3, etc. If False,
1558
- rotary embedding will combine dimensions 0 & rotary_dim / 2, 1 & rotary_dim / 2 + 1
1559
- (i.e. GPT-NeoX style).
1560
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
1561
- (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
1562
- is added to the attention score of query i and key j.
1563
- num_splits: int. If > 1, split the key/value into this many chunks along the sequence.
1564
- If num_splits == 1, we don't split the key/value. If num_splits == 0, we use a heuristic
1565
- to automatically determine the number of splits.
1566
- Don't change this unless you know what you are doing.
1567
- return_softmax_lse: bool. Whether to return the logsumexp of the attention scores.
1568
-
1569
- Return:
1570
- out: (batch_size, seqlen, nheads, headdim).
1571
- softmax_lse [optional, if return_softmax_lse=True]: (batch_size, nheads, seqlen). The
1572
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1573
- normalization factor).
1574
- """
1575
- assert k_cache.stride(-1) == 1, "k_cache must have contiguous last dimension"
1576
- assert v_cache.stride(-1) == 1, "v_cache must have contiguous last dimension"
1577
- q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
1578
- if softmax_scale is None:
1579
- softmax_scale = q.shape[-1] ** (-0.5)
1580
- if cache_seqlens is not None and isinstance(cache_seqlens, int):
1581
- cache_seqlens = torch.full(
1582
- (k_cache.shape[0],), cache_seqlens, dtype=torch.int32, device=k_cache.device
1583
- )
1584
- cache_seqlens = maybe_contiguous(cache_seqlens)
1585
- cache_batch_idx = maybe_contiguous(cache_batch_idx)
1586
- block_table = maybe_contiguous(block_table)
1587
- out, softmax_lse = flash_attn_gpu.fwd_kvcache(
1588
- q,
1589
- k_cache,
1590
- v_cache,
1591
- k,
1592
- v,
1593
- cache_seqlens,
1594
- rotary_cos,
1595
- rotary_sin,
1596
- cache_batch_idx,
1597
- cache_leftpad,
1598
- block_table,
1599
- alibi_slopes,
1600
- None,
1601
- softmax_scale,
1602
- causal,
1603
- window_size[0],
1604
- window_size[1],
1605
- softcap,
1606
- rotary_interleaved,
1607
- num_splits,
1608
- )
1609
- return (out, softmax_lse) if return_softmax_lse else out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/flash_attn/layers/__init__.py DELETED
File without changes
build/torch26-cxx98-cu124-x86_64-linux/flash_attn/layers/patch_embed.py DELETED
@@ -1,67 +0,0 @@
1
- # We use the same API as https://github.com/rwightman/pytorch-image-models/blob/v0.6.11/timm/models/layers/patch_embed.py
2
- # But we use nn.Linear instead of Conv2d and it's about 8x faster.
3
-
4
- from functools import partial
5
-
6
- import torch.nn as nn
7
- from einops import rearrange
8
- from torch import _assert
9
- from torch.nn.modules.utils import _pair
10
-
11
- try:
12
- from flash_attn.ops.fused_dense import FusedDense
13
- except ImportError:
14
- FusedDense = None
15
-
16
-
17
- class PatchEmbed(nn.Module):
18
- """2D Image to Patch Embedding"""
19
-
20
- def __init__(
21
- self,
22
- img_size=224,
23
- patch_size=16,
24
- in_chans=3,
25
- embed_dim=768,
26
- norm_layer=None,
27
- flatten=True,
28
- bias=True,
29
- fused_bias_fc=False,
30
- ):
31
- super().__init__()
32
- img_size = _pair(img_size)
33
- patch_size = _pair(patch_size)
34
- self.img_size = img_size
35
- self.patch_size = patch_size
36
- self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
37
- self.num_patches = self.grid_size[0] * self.grid_size[1]
38
- self.flatten = flatten
39
- if fused_bias_fc and FusedDense is None:
40
- raise ImportError("fused_dense is not installed")
41
-
42
- linear_cls = nn.Linear if not fused_bias_fc or not bias else FusedDense
43
- self.proj = linear_cls(in_chans * patch_size[0] * patch_size[1], embed_dim, bias=bias)
44
- self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
45
-
46
- def forward(self, x):
47
- _, _, H, W = x.shape
48
- _assert(
49
- H == self.img_size[0],
50
- f"Input image height ({H}) doesn't match model ({self.img_size[0]}).",
51
- )
52
- _assert(
53
- W == self.img_size[1],
54
- f"Input image width ({W}) doesn't match model ({self.img_size[1]}).",
55
- )
56
- x = self.proj(
57
- rearrange(
58
- x,
59
- "b c (h p1) (w p2) -> b h w (c p1 p2)",
60
- p1=self.patch_size[0],
61
- p2=self.patch_size[1],
62
- )
63
- )
64
- if self.flatten:
65
- x = rearrange(x, "b h w c -> b (h w) c")
66
- x = self.norm(x)
67
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/flash_attn/layers/rotary.py DELETED
@@ -1,483 +0,0 @@
1
- # Copyright (c) 2025, Tri Dao
2
-
3
- import math
4
- from functools import partial
5
- from typing import Optional, Tuple, Union
6
-
7
- import torch
8
- from torch import Tensor
9
-
10
- from einops import rearrange, repeat
11
- # from flash_attn.ops.triton.rotary import apply_rotary
12
- from ..ops.triton.rotary import apply_rotary
13
-
14
-
15
- def rotate_half(x, interleaved=False):
16
- if not interleaved:
17
- x1, x2 = x.chunk(2, dim=-1)
18
- return torch.cat((-x2, x1), dim=-1)
19
- else:
20
- x1, x2 = x[..., ::2], x[..., 1::2]
21
- return rearrange(torch.stack((-x2, x1), dim=-1), "... d two -> ... (d two)", two=2)
22
-
23
-
24
- def apply_rotary_emb_torch(x, cos, sin, interleaved=False):
25
- """
26
- x: (batch_size, seqlen, nheads, headdim)
27
- cos, sin: (seqlen, rotary_dim / 2) or (batch_size, seqlen, rotary_dim / 2)
28
- """
29
- ro_dim = cos.shape[-1] * 2
30
- assert ro_dim <= x.shape[-1]
31
- cos = repeat(cos, "... d -> ... 1 (2 d)" if not interleaved else "... d -> ... 1 (d 2)")
32
- sin = repeat(sin, "... d -> ... 1 (2 d)" if not interleaved else "... d -> ... 1 (d 2)")
33
- return torch.cat(
34
- [x[..., :ro_dim] * cos + rotate_half(x[..., :ro_dim], interleaved) * sin, x[..., ro_dim:]],
35
- dim=-1,
36
- )
37
-
38
-
39
- class ApplyRotaryEmb(torch.autograd.Function):
40
- @staticmethod
41
- def forward(
42
- ctx,
43
- x,
44
- cos,
45
- sin,
46
- interleaved=False,
47
- inplace=False,
48
- seqlen_offsets: Union[int, Tensor] = 0,
49
- cu_seqlens: Optional[Tensor] = None,
50
- max_seqlen: Optional[int] = None,
51
- ):
52
- out = apply_rotary(
53
- x,
54
- cos,
55
- sin,
56
- seqlen_offsets=seqlen_offsets,
57
- cu_seqlens=cu_seqlens,
58
- max_seqlen=max_seqlen,
59
- interleaved=interleaved,
60
- inplace=inplace,
61
- )
62
- if isinstance(seqlen_offsets, int):
63
- ctx.save_for_backward(cos, sin, cu_seqlens) # Can't save int with save_for_backward
64
- ctx.seqlen_offsets = seqlen_offsets
65
- else:
66
- ctx.save_for_backward(cos, sin, cu_seqlens, seqlen_offsets)
67
- ctx.seqlen_offsets = None
68
- ctx.interleaved = interleaved
69
- ctx.inplace = inplace
70
- ctx.max_seqlen = max_seqlen
71
- return out if not inplace else x
72
-
73
- @staticmethod
74
- def backward(ctx, do):
75
- seqlen_offsets = ctx.seqlen_offsets
76
- if seqlen_offsets is None:
77
- cos, sin, cu_seqlens, seqlen_offsets = ctx.saved_tensors
78
- else:
79
- cos, sin, cu_seqlens = ctx.saved_tensors
80
- dx = apply_rotary(
81
- do,
82
- cos,
83
- sin,
84
- seqlen_offsets=seqlen_offsets,
85
- cu_seqlens=cu_seqlens,
86
- max_seqlen=ctx.max_seqlen,
87
- interleaved=ctx.interleaved,
88
- inplace=ctx.inplace,
89
- conjugate=True,
90
- )
91
- return dx, None, None, None, None, None, None, None
92
-
93
-
94
- def apply_rotary_emb(
95
- x,
96
- cos,
97
- sin,
98
- interleaved=False,
99
- inplace=False,
100
- seqlen_offsets: Union[int, Tensor] = 0,
101
- cu_seqlens: Optional[Tensor] = None,
102
- max_seqlen: Optional[int] = None,
103
- ):
104
- """
105
- Arguments:
106
- x: (batch_size, seqlen, nheads, headdim) if cu_seqlens is None
107
- else (total_seqlen, nheads, headdim)
108
- cos, sin: (seqlen_rotary, rotary_dim / 2)
109
- interleaved: if True, rotate pairs of even and odd dimensions (GPT-J style) instead
110
- of 1st half and 2nd half (GPT-NeoX style).
111
- inplace: if True, apply rotary embedding in-place.
112
- seqlen_offsets: (batch_size,) or int. Each sequence in x is shifted by this amount.
113
- Most commonly used in inference when we have KV cache.
114
- cu_seqlens: (batch + 1,) or None
115
- max_seqlen: int
116
- Return:
117
- out: (batch_size, seqlen, nheads, headdim) if cu_seqlens is None
118
- else (total_seqlen, nheads, headdim)
119
- rotary_dim must be <= headdim
120
- Apply rotary embedding to the first rotary_dim of x.
121
- """
122
- return ApplyRotaryEmb.apply(
123
- x, cos, sin, interleaved, inplace, seqlen_offsets, cu_seqlens, max_seqlen
124
- )
125
-
126
-
127
- # For backward compatibility
128
- apply_rotary_emb_func = apply_rotary_emb
129
-
130
-
131
- def _apply_rotary_emb_qkv(
132
- qkv,
133
- cos,
134
- sin,
135
- cos_k=None,
136
- sin_k=None,
137
- interleaved=False,
138
- inplace=False,
139
- conjugate=False,
140
- seqlen_offsets: Union[int, Tensor] = 0,
141
- num_heads_q: Optional[int] = None,
142
- ):
143
- apply_rotary_fn = partial(
144
- apply_rotary,
145
- interleaved=interleaved,
146
- inplace=inplace,
147
- conjugate=conjugate,
148
- seqlen_offsets=seqlen_offsets
149
- )
150
- if cos_k is None and sin_k is None and qkv.is_contiguous():
151
- # Call 1 kernel instead of 2 kernels
152
- # We need qkv to be contiguous so that when we reshape to combine (3, nheads)
153
- # dimensions, we get the same tensor
154
- if qkv.dim() == 5:
155
- batch, seqlen, three, nheads, headdim = qkv.shape
156
- assert three == 3
157
- # qk = rearrange(qkv[:, :, :2], "b s t h d -> b s (t h) d")
158
- qk = qkv[:, :, :2].reshape(batch, seqlen, -1, headdim)
159
- qk = apply_rotary_fn(qk, cos, sin)
160
- else:
161
- assert qkv.dim() == 4
162
- assert num_heads_q is not None
163
- num_heads_k = (qkv.shape[2] - num_heads_q) // 2
164
- assert qkv.shape[2] == num_heads_q + 2 * num_heads_k
165
- qk = qkv[:, :, :num_heads_q + num_heads_k]
166
- qk = apply_rotary_fn(qk, cos, sin)
167
- if not inplace:
168
- if qkv.dim() == 5:
169
- qkv = torch.cat([rearrange(qk, "b s (t h) d -> b s t h d", t=2), qkv[:, :, 2:]], dim=2)
170
- else:
171
- qkv = torch.cat([qk, qkv[:, :, num_heads_q + num_heads_k :]], dim=2)
172
- else:
173
- cos_k = cos if cos_k is None else cos_k
174
- sin_k = sin if sin_k is None else sin_k
175
- if qkv.dim() == 5:
176
- batch, seqlen, three, nheads, headdim = qkv.shape
177
- assert three == 3
178
- q, k = qkv[:, :, 0], qkv[:, :, 1]
179
- else:
180
- assert qkv.dim() == 4
181
- assert num_heads_q is not None
182
- num_heads_k = (qkv.shape[2] - num_heads_q) // 2
183
- assert qkv.shape[2] == num_heads_q + 2 * num_heads_k
184
- q, k = qkv[:, :, :num_heads_q], qkv[:, :, num_heads_q : num_heads_q + num_heads_k]
185
- q = apply_rotary_fn(q, cos, sin)
186
- k = apply_rotary_fn(k, cos_k, sin_k)
187
- if not inplace:
188
- if qkv.dim() == 5:
189
- qkv = torch.stack([q, k, qkv[:, :, 2]], dim=2)
190
- else:
191
- qkv = torch.cat([q, k, qkv[:, :, num_heads_q + num_heads_k:]], dim=2)
192
- return qkv
193
-
194
-
195
- class ApplyRotaryEmbQKV_(torch.autograd.Function):
196
- @staticmethod
197
- def forward(
198
- ctx,
199
- qkv,
200
- cos,
201
- sin,
202
- cos_k=None,
203
- sin_k=None,
204
- interleaved=False,
205
- seqlen_offsets: Union[int, torch.Tensor] = 0,
206
- num_heads_q: Optional[int] = None,
207
- ):
208
- # apply_rotary_emb_qkv_inplace(
209
- qkv = _apply_rotary_emb_qkv(
210
- qkv, cos, sin, cos_k, sin_k, interleaved=interleaved, inplace=True,
211
- seqlen_offsets=seqlen_offsets, num_heads_q=num_heads_q,
212
- )
213
- if isinstance(seqlen_offsets, int):
214
- ctx.save_for_backward(cos, sin, cos_k, sin_k)
215
- ctx.seqlen_offsets = seqlen_offsets
216
- else:
217
- ctx.save_for_backward(cos, sin, cos_k, sin_k, seqlen_offsets)
218
- ctx.seqlen_offsets = None
219
- ctx.interleaved = interleaved
220
- ctx.num_heads_q = num_heads_q
221
- return qkv
222
-
223
- @staticmethod
224
- def backward(ctx, dqkv):
225
- seqlen_offsets = ctx.seqlen_offsets
226
- if seqlen_offsets is None:
227
- cos, sin, cos_k, sin_k, seqlen_offsets = ctx.saved_tensors
228
- else:
229
- cos, sin, cos_k, sin_k = ctx.saved_tensors
230
- dqkv = _apply_rotary_emb_qkv(
231
- dqkv, cos, sin, cos_k, sin_k, interleaved=ctx.interleaved, inplace=True,
232
- seqlen_offsets=seqlen_offsets, num_heads_q=ctx.num_heads_q, conjugate=True,
233
- )
234
- return dqkv, None, None, None, None, None, None, None
235
-
236
-
237
- def apply_rotary_emb_qkv_(
238
- qkv,
239
- cos,
240
- sin,
241
- cos_k=None,
242
- sin_k=None,
243
- interleaved=False,
244
- seqlen_offsets: Union[int, torch.Tensor] = 0,
245
- num_heads_q: Optional[int] = None,
246
- ):
247
- """
248
- Arguments:
249
- qkv: (batch_size, seqlen, 3, nheads, headdim) or (batch_size, seqlen, num_heads_q + 2 * num_heads_k, headdim).
250
- If qkv has shape (batch_size, seqlen, num_heads_q + 2 * num_heads_k, headdim) (e.g. MQA / GQA),
251
- then num_heads_q must be provided.
252
- cos, sin: (seqlen, rotary_dim / 2)
253
- cos_k, sin_k: (seqlen, rotary_dim / 2), optional
254
- interleaved: if True, rotate pairs of even and odd dimensions (GPT-J style) instead of
255
- 1st half and 2nd half (GPT-NeoX style).
256
- seqlen_offsets: (batch_size,) or int. Each sequence in Q and K is shifted by this amount.
257
- Most commonly used in inference when we have KV cache.
258
- Return:
259
- qkv: (batch_size, seqlen, 3, nheads, headdim) or (batch_size, seqlen, num_heads_q + 2 * num_heads_k, headdim)
260
- rotary_dim must be <= headdim
261
- Apply rotary embedding *inplace* to the first rotary_dim of Q and K.
262
- """
263
- return ApplyRotaryEmbQKV_.apply(
264
- qkv, cos, sin, cos_k, sin_k, interleaved, seqlen_offsets, num_heads_q
265
- )
266
-
267
-
268
- class ApplyRotaryEmbKV_(torch.autograd.Function):
269
-
270
- @staticmethod
271
- def forward(ctx, kv, cos, sin, interleaved=False, seqlen_offsets: Union[int, torch.Tensor] = 0):
272
- batch, seqlen, two, nheads, headdim = kv.shape
273
- assert two == 2
274
- k = kv[:, :, 0]
275
- apply_rotary(
276
- k, cos, sin, seqlen_offsets=seqlen_offsets, interleaved=interleaved, inplace=True
277
- )
278
- if isinstance(seqlen_offsets, int):
279
- ctx.save_for_backward(cos, sin) # Can't save int with save_for_backward
280
- ctx.seqlen_offsets = seqlen_offsets
281
- else:
282
- ctx.save_for_backward(cos, sin, seqlen_offsets)
283
- ctx.seqlen_offsets = None
284
- ctx.interleaved = interleaved
285
- return kv
286
-
287
- @staticmethod
288
- def backward(ctx, dkv):
289
- seqlen_offsets = ctx.seqlen_offsets
290
- if seqlen_offsets is None:
291
- cos, sin, seqlen_offsets = ctx.saved_tensors
292
- else:
293
- cos, sin = ctx.saved_tensors
294
- apply_rotary(
295
- dkv[:, :, 0],
296
- cos,
297
- sin,
298
- seqlen_offsets=seqlen_offsets,
299
- interleaved=ctx.interleaved,
300
- inplace=True,
301
- conjugate=True,
302
- )
303
- return dkv, None, None, None, None
304
-
305
-
306
- apply_rotary_emb_kv_ = ApplyRotaryEmbKV_.apply
307
-
308
-
309
- def apply_rotary_emb_kv_(
310
- kv,
311
- cos,
312
- sin,
313
- interleaved=False,
314
- seqlen_offsets: Union[int, torch.Tensor] = 0,
315
- ):
316
- """
317
- Arguments:
318
- kv: (batch_size, seqlen, 2, nheads, headdim)
319
- cos, sin: (seqlen, rotary_dim / 2)
320
- interleaved: if True, rotate pairs of even and odd dimensions (GPT-J style) instead of
321
- 1st half and 2nd half (GPT-NeoX style).
322
- seqlen_offsets: (batch_size,) or int. Each sequence in Q and K is shifted by this amount.
323
- Most commonly used in inference when we have KV cache.
324
- Return:
325
- kv: (batch_size, seqlen, 2, nheads, headdim)
326
- rotary_dim must be <= headdim
327
- Apply rotary embedding *inplace* to the first rotary_dim of K.
328
- """
329
- return ApplyRotaryEmbKV_.apply(kv, cos, sin, interleaved, seqlen_offsets)
330
-
331
-
332
- class RotaryEmbedding(torch.nn.Module):
333
- """
334
- The rotary position embeddings from RoFormer_ (Su et. al).
335
- A crucial insight from the method is that the query and keys are
336
- transformed by rotation matrices which depend on the relative positions.
337
-
338
- Other implementations are available in the Rotary Transformer repo_ and in
339
- GPT-NeoX_, GPT-NeoX was an inspiration
340
-
341
- .. _RoFormer: https://arxiv.org/abs/2104.09864
342
- .. _repo: https://github.com/ZhuiyiTechnology/roformer
343
- .. _GPT-NeoX: https://github.com/EleutherAI/gpt-neox
344
-
345
- If scale_base is not None, this implements XPos (Sun et al., https://arxiv.org/abs/2212.10554).
346
- A recommended value for scale_base is 512: https://github.com/HazyResearch/flash-attention/issues/96
347
- Reference: https://github.com/sunyt32/torchscale/blob/main/torchscale/component/xpos_relative_position.py
348
- """
349
-
350
- def __init__(
351
- self,
352
- dim: int,
353
- base=10000.0,
354
- interleaved=False,
355
- scale_base=None,
356
- device=None,
357
- ):
358
- """
359
- interleaved: if True, rotate pairs of even and odd dimensions (GPT-J style) instead
360
- of 1st half and 2nd half (GPT-NeoX style).
361
- """
362
- super().__init__()
363
- self.dim = dim
364
- self.base = float(base)
365
- # Generate and save the inverse frequency buffer (non trainable)
366
- inv_freq = self._compute_inv_freq(device)
367
- self.register_buffer("inv_freq", inv_freq, persistent=False)
368
- self.interleaved = interleaved
369
- self.scale_base = scale_base
370
- scale = (
371
- (torch.arange(0, dim, 2, device=device, dtype=torch.float32) + 0.4 * dim) / (1.4 * dim)
372
- if scale_base is not None
373
- else None
374
- )
375
- self.register_buffer("scale", scale, persistent=False)
376
-
377
- self._seq_len_cached = 0
378
- self._cos_cached = None
379
- self._sin_cached = None
380
- self._cos_k_cached = None
381
- self._sin_k_cached = None
382
-
383
- def _compute_inv_freq(self, device=None):
384
- return 1.0 / (
385
- self.base
386
- ** (torch.arange(0, self.dim, 2, device=device, dtype=torch.float32) / self.dim)
387
- )
388
-
389
- def _update_cos_sin_cache(self, seqlen, device=None, dtype=None):
390
- # Reset the tables if the sequence length has changed,
391
- # if we're on a new device (possibly due to tracing for instance),
392
- # or if we're switching from inference mode to training
393
- if (
394
- seqlen > self._seq_len_cached
395
- or self._cos_cached is None
396
- or self._cos_cached.device != device
397
- or self._cos_cached.dtype != dtype
398
- or (self.training and self._cos_cached.is_inference())
399
- ):
400
- self._seq_len_cached = seqlen
401
- # We want fp32 here, not self.inv_freq.dtype, since the model could be loaded in bf16
402
- # And the output of arange can be quite large, so bf16 would lose a lot of precision.
403
- t = torch.arange(seqlen, device=device, dtype=torch.float32)
404
- # We want fp32 here as well since inv_freq will be multiplied with t, and the output
405
- # will be large. Having it in bf16 will lose a lot of precision and cause the
406
- # cos & sin output to change significantly.
407
- # We want to recompute self.inv_freq if it was not loaded in fp32
408
- if self.inv_freq.dtype != torch.float32:
409
- inv_freq = self._compute_inv_freq(device=device)
410
- else:
411
- inv_freq = self.inv_freq
412
- # Don't do einsum, it converts fp32 to bf16 under AMP
413
- # freqs = torch.einsum("i,j->ij", t, self.inv_freq)
414
- freqs = torch.outer(t, inv_freq)
415
- if self.scale is None:
416
- self._cos_cached = torch.cos(freqs).to(dtype)
417
- self._sin_cached = torch.sin(freqs).to(dtype)
418
- else:
419
- power = (
420
- torch.arange(seqlen, dtype=self.scale.dtype, device=self.scale.device)
421
- - seqlen // 2
422
- ) / self.scale_base
423
- scale = self.scale.to(device=power.device) ** rearrange(power, "s -> s 1")
424
- # We want the multiplication by scale to happen in fp32
425
- self._cos_cached = (torch.cos(freqs) * scale).to(dtype)
426
- self._sin_cached = (torch.sin(freqs) * scale).to(dtype)
427
- self._cos_k_cached = (torch.cos(freqs) / scale).to(dtype)
428
- self._sin_k_cached = (torch.sin(freqs) / scale).to(dtype)
429
-
430
- def forward(
431
- self,
432
- qkv: torch.Tensor,
433
- kv: Optional[torch.Tensor] = None,
434
- seqlen_offset: Union[int, torch.Tensor] = 0,
435
- max_seqlen: Optional[int] = None,
436
- num_heads_q: Optional[int] = None,
437
- ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
438
- """
439
- qkv: (batch, seqlen, 3, nheads, headdim) or (batch, seqlen, num_heads_q + 2 * num_heads_k, headdim)
440
- if kv is none, else it's just q of shape (batch, seqlen, nheads, headdim).
441
- If qkv has shape (batch, seqlen, num_heads_q + 2 * num_heads_k, headdim) (e.g. MQA / GQA),
442
- then num_heads_q must be provided.
443
- kv: (batch, seqlen, 2, nheads, headdim)
444
- seqlen_offset: (batch_size,) or int. Each sequence in x is shifted by this amount.
445
- Most commonly used in inference when we have KV cache.
446
- If it's a tensor of shape (batch_size,), then to update the cos / sin cache, one
447
- should pass in max_seqlen, which will update the cos / sin cache up to that length.
448
- Apply rotary embedding *inplace* to qkv and / or kv.
449
- """
450
- seqlen = qkv.shape[1]
451
- if max_seqlen is not None:
452
- self._update_cos_sin_cache(max_seqlen, device=qkv.device, dtype=qkv.dtype)
453
- elif isinstance(seqlen_offset, int):
454
- self._update_cos_sin_cache(seqlen + seqlen_offset, device=qkv.device, dtype=qkv.dtype)
455
- if kv is None:
456
- return apply_rotary_emb_qkv_(
457
- qkv,
458
- self._cos_cached,
459
- self._sin_cached,
460
- self._cos_k_cached if self.scale is not None else None,
461
- self._sin_k_cached if self.scale is not None else None,
462
- interleaved=self.interleaved,
463
- seqlen_offsets=seqlen_offset,
464
- num_heads_q=num_heads_q,
465
- )
466
- else:
467
- q = qkv
468
- q = apply_rotary_emb_func(
469
- q,
470
- self._cos_cached,
471
- self._sin_cached,
472
- interleaved=self.interleaved,
473
- inplace=True,
474
- seqlen_offsets=seqlen_offset,
475
- )
476
- kv = apply_rotary_emb_kv_(
477
- kv,
478
- self._cos_cached if self.scale is None else self._cos_k_cached,
479
- self._sin_cached if self.scale is None else self._sin_k_cached,
480
- interleaved=self.interleaved,
481
- seqlen_offsets=seqlen_offset,
482
- )
483
- return q, kv
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/flash_attn/ops/__init__.py DELETED
File without changes
build/torch26-cxx98-cu124-x86_64-linux/flash_attn/ops/activations.py DELETED
@@ -1,135 +0,0 @@
1
- # Copied from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/model/layers/activations.py
2
- import math
3
-
4
- import torch
5
- import torch.nn as nn
6
- import torch.nn.functional as F
7
-
8
- # 1/sqrt(2*pi)-> 0.3989423
9
- # 1/sqrt(2) -> 0.70710678
10
- # sqrt(2/pi) -> 0.79788456
11
-
12
- # this function is tanh approximation of gelu
13
- # actual gelu is:
14
- # x * 0.5 * (1.0 + torch.erf(x * 0.70710678))
15
- @torch.jit.script
16
- def bias_gelu(y, bias):
17
- x = bias + y
18
- return (x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))).to(dtype=y.dtype)
19
-
20
-
21
- # gradient of tanh approximation of gelu
22
- # gradient of actual gelu is:
23
- # 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x)
24
- @torch.jit.script
25
- def bias_gelu_back(g, y, bias):
26
- """Assume that y has shape (B, D) and bias has shape (D)"""
27
- x = bias + y
28
- tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
29
- # sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
30
- ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (
31
- 1 + tanh_out
32
- )
33
- grad_y = ff * g
34
- return grad_y.to(dtype=y.dtype), grad_y.sum(dim=(0), dtype=bias.dtype)
35
-
36
-
37
- class GeLUFunction(torch.autograd.Function):
38
- @staticmethod
39
- # bias is an optional argument
40
- def forward(ctx, input, bias):
41
- ctx.save_for_backward(input, bias)
42
- return bias_gelu(input, bias)
43
-
44
- @staticmethod
45
- def backward(ctx, grad_output):
46
- input, bias = ctx.saved_tensors
47
- tmp = bias_gelu_back(grad_output, input, bias)
48
- return tmp, tmp
49
-
50
-
51
- bias_gelu_impl = GeLUFunction.apply
52
-
53
- # this function is tanh approximation of gelu
54
- # actual gelu is:
55
- # x * 0.5 * (1.0 + torch.erf(x * 0.70710678))
56
- @torch.jit.script
57
- def gelu_fwd(x):
58
- return (x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))).to(dtype=x.dtype)
59
-
60
-
61
- # gradient of tanh approximation of gelu
62
- # gradient of actual gelu is:
63
- # 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x)
64
- @torch.jit.script
65
- def gelu_bwd(g, x):
66
- tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
67
- # sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
68
- ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (
69
- 1 + tanh_out
70
- )
71
- return (ff * g).to(dtype=x.dtype)
72
-
73
-
74
- class FastGeLUFunction(torch.autograd.Function):
75
- @staticmethod
76
- # bias is an optional argument
77
- def forward(ctx, input):
78
- ctx.save_for_backward(input)
79
- return gelu_fwd(input)
80
-
81
- @staticmethod
82
- def backward(ctx, grad_output):
83
- (input,) = ctx.saved_tensors
84
- tmp = gelu_bwd(grad_output, input)
85
- return tmp
86
-
87
-
88
- fast_gelu_impl = FastGeLUFunction.apply
89
-
90
-
91
- @torch.jit.script
92
- def relu_bwd(g, x):
93
- return torch.where(x >= 0, g, 0.0).to(dtype=x.dtype)
94
-
95
-
96
- @torch.jit.script
97
- def sqrelu_fwd(x):
98
- r = F.relu(x)
99
- return (r * r).to(dtype=x.dtype)
100
-
101
-
102
- @torch.jit.script
103
- def sqrelu_bwd(g, x):
104
- return (2.0 * g * F.relu(x)).to(dtype=x.dtype)
105
-
106
-
107
- swiglu_fwd_codestring = """
108
- template <typename T> T swiglu_fwd(T x, T y) {
109
- return float(x) * float(y) / (1.0f + ::exp(-float(x)));
110
- }
111
- """
112
- swiglu_bwd_codestring = """
113
- template <typename T> void swiglu_bwd(T x, T y, T g, T& dx, T& dy) {
114
- float x_sigmoid = 1.0f / (1.0f + ::exp(-float(x)));
115
- dx = x_sigmoid * (1 + float(x) * (1.0f - x_sigmoid)) * float(g) * float(y);
116
- dy = float(x) * x_sigmoid * float(g);
117
- }
118
- """
119
- swiglu_fwd = torch.cuda.jiterator._create_jit_fn(swiglu_fwd_codestring)
120
- swiglu_bwd = torch.cuda.jiterator._create_multi_output_jit_fn(swiglu_bwd_codestring, num_outputs=2)
121
-
122
-
123
- class SwiGLUFunction(torch.autograd.Function):
124
-
125
- @staticmethod
126
- def forward(ctx, x, y):
127
- ctx.save_for_backward(x, y)
128
- return swiglu_fwd(x, y)
129
-
130
- @staticmethod
131
- def backward(ctx, dout):
132
- x, y = ctx.saved_tensors
133
- return swiglu_bwd(x, y, dout)
134
-
135
- swiglu = SwiGLUFunction.apply
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/flash_attn/ops/fused_dense.py DELETED
@@ -1,688 +0,0 @@
1
- # Copyright (c) 2023, Tri Dao.
2
- # Inspired by https://github.com/NVIDIA/apex/blob/master/apex/fused_dense/fused_dense.py
3
- # We make it work with pytorch amp and with bfloat16.
4
- # The TensorParallel linear modules are inspired by https://github.com/NVIDIA/apex/blob/master/apex/transformer/tensor_parallel/layers.py
5
- from functools import partial
6
- from typing import Optional
7
-
8
- # import fused_dense_cuda # from apex
9
- import fused_dense_lib as fused_dense_cuda
10
- import torch
11
- import torch.nn as nn
12
- import torch.nn.functional as F
13
- from torch import Tensor
14
- from torch.distributed import ProcessGroup
15
-
16
- from flash_attn.utils.torch import custom_fwd, custom_bwd
17
- from flash_attn.ops.activations import gelu_bwd, relu_bwd, sqrelu_bwd, sqrelu_fwd
18
- from flash_attn.utils.distributed import (
19
- all_gather_raw,
20
- all_reduce,
21
- all_reduce_raw,
22
- reduce_scatter,
23
- reduce_scatter_raw,
24
- )
25
-
26
-
27
- class FusedDenseFunc(torch.autograd.Function):
28
- @staticmethod
29
- @custom_fwd
30
- def forward(
31
- ctx, x, weight, bias, return_residual=False, process_group=None, sequence_parallel=True
32
- ):
33
- """
34
- If process_group is not None and sequence_parallel=True, we're doing Tensor Parallel
35
- with sequence parallelism: we do an all_gather_raw of x before doing the matmul.
36
- """
37
- ctx.compute_weight_gradient = weight.requires_grad
38
- ctx.return_residual = return_residual
39
- ctx.process_group = process_group
40
- ctx.sequence_parallel = sequence_parallel
41
-
42
- if torch.is_autocast_enabled():
43
- x = x.to(dtype=torch.get_autocast_gpu_dtype())
44
- x = x.contiguous()
45
- if process_group is not None and sequence_parallel:
46
- # We want to kick off the all_gather early, before weight dtype conversion
47
- total_x, handle_x = all_gather_raw(x, process_group, async_op=True)
48
- else:
49
- total_x = x
50
-
51
- if torch.is_autocast_enabled():
52
- weight = weight.to(dtype=torch.get_autocast_gpu_dtype())
53
- bias = bias.to(dtype=torch.get_autocast_gpu_dtype()) if bias is not None else None
54
- weight = weight.contiguous()
55
- if process_group is not None and sequence_parallel:
56
- handle_x.wait()
57
- batch_shape, n = total_x.shape[:-1], total_x.shape[-1]
58
- batch_dim = batch_shape.numel()
59
- # https://github.com/pytorch/pytorch/blob/5b51849b48a7dbccd297286cc0110def4706f9e7/aten/src/ATen/native/cuda/Blas.cpp#L174
60
- if min(batch_dim, n, *weight.shape) > 65535 * 32:
61
- raise RuntimeError("fused_dense only supports matrix dims <= 2M")
62
- output = F.linear(total_x, weight, bias)
63
- if ctx.compute_weight_gradient:
64
- ctx.save_for_backward(x, weight)
65
- else:
66
- ctx.save_for_backward(weight)
67
- return output if not return_residual else (output, x)
68
-
69
- @staticmethod
70
- @custom_bwd
71
- def backward(ctx, grad_output, *args):
72
- grad_output = grad_output.contiguous()
73
- if ctx.return_residual:
74
- (grad_input,) = args
75
- grad_input = grad_input.contiguous()
76
- process_group = ctx.process_group
77
- sequence_parallel = ctx.sequence_parallel
78
- if ctx.compute_weight_gradient:
79
- x, weight = ctx.saved_tensors
80
- if process_group is not None and sequence_parallel:
81
- total_x, handle_x = all_gather_raw(x, process_group, async_op=True)
82
- else:
83
- total_x = x
84
- else:
85
- (weight,) = ctx.saved_tensors
86
- total_x = None
87
- batch_shape = grad_output.shape[:-1]
88
- batch_dim = batch_shape.numel()
89
- grad_output = grad_output.reshape(batch_dim, grad_output.shape[-1])
90
- if ctx.needs_input_grad[0]:
91
- if not ctx.return_residual:
92
- grad_input = F.linear(grad_output, weight.t())
93
- else:
94
- grad_input = torch.addmm(
95
- grad_input.reshape(batch_dim, grad_input.shape[-1]), grad_output, weight
96
- )
97
- grad_input = grad_input.reshape(*batch_shape, grad_input.shape[-1])
98
- if process_group is not None:
99
- reduce_fn = reduce_scatter_raw if sequence_parallel else all_reduce_raw
100
- grad_input, handle_grad_input = reduce_fn(grad_input, process_group, async_op=True)
101
- else:
102
- grad_input = None
103
- if ctx.needs_input_grad[1]:
104
- assert ctx.compute_weight_gradient
105
- if process_group is not None and sequence_parallel:
106
- handle_x.wait()
107
- grad_weight, grad_bias = fused_dense_cuda.linear_bias_wgrad(
108
- total_x.reshape(batch_dim, total_x.shape[-1]), grad_output, ctx.needs_input_grad[2]
109
- )
110
- else:
111
- grad_weight = None
112
- grad_bias = grad_output if ctx.needs_input_grad[2] else None
113
- if process_group is not None and ctx.needs_input_grad[0]:
114
- handle_grad_input.wait()
115
- return grad_input, grad_weight, grad_bias, None, None, None
116
-
117
-
118
- def fused_dense_func(
119
- x: Tensor,
120
- weight: Tensor,
121
- bias: Optional[Tensor] = None,
122
- return_residual: bool = False,
123
- process_group: Optional[ProcessGroup] = None,
124
- sequence_parallel: bool = True,
125
- ):
126
- dtype_eligible = x.dtype in [torch.float16, torch.bfloat16] or (
127
- x.dtype == torch.float32 and torch.is_autocast_enabled()
128
- )
129
- if x.is_cuda and weight.is_cuda and (bias is None or bias.is_cuda) and dtype_eligible:
130
- return FusedDenseFunc.apply(
131
- x, weight, bias, return_residual, process_group, sequence_parallel
132
- )
133
- else:
134
- assert process_group is None
135
- out = F.linear(x, weight, bias)
136
- return out if not return_residual else (out, x)
137
-
138
-
139
- class FusedDense(nn.Linear):
140
- def __init__(
141
- self,
142
- in_features: int,
143
- out_features: int,
144
- bias: bool = True,
145
- return_residual: bool = False,
146
- device=None,
147
- dtype=None,
148
- ) -> None:
149
- super().__init__(in_features, out_features, bias=bias, device=device, dtype=dtype)
150
- self.return_residual = return_residual
151
-
152
- def forward(self, x, process_group=None):
153
- """
154
- If process_group is not None, we're doing Tensor Parallel with sequence parallelism:
155
- we do an all_gather of x before doing the matmul.
156
- """
157
- return fused_dense_func(
158
- x,
159
- self.weight,
160
- self.bias,
161
- return_residual=self.return_residual,
162
- process_group=process_group,
163
- )
164
-
165
-
166
- class ColumnParallelLinear(nn.Linear):
167
- def __init__(
168
- self,
169
- in_features: int,
170
- out_features: int,
171
- process_group: ProcessGroup,
172
- bias: bool = True,
173
- sequence_parallel=True,
174
- multiple_of=1,
175
- device=None,
176
- dtype=None,
177
- ) -> None:
178
- world_size = torch.distributed.get_world_size(process_group)
179
- if out_features % multiple_of:
180
- raise ValueError(f"out_features ({out_features}) must be a multiple of {multiple_of}")
181
- multiple = out_features // multiple_of
182
- # We want to split @multiple across world_size, but it could be an uneven split
183
- div = multiple // world_size
184
- mod = multiple % world_size
185
- # The first @mod ranks get @div + 1 copies, the rest get @div copies
186
- local_multiple = div + int(torch.distributed.get_rank(process_group) < mod)
187
- super().__init__(
188
- in_features, local_multiple * multiple_of, bias=bias, device=device, dtype=dtype
189
- )
190
- self.process_group = process_group
191
- self.sequence_parallel = sequence_parallel
192
-
193
- def forward(self, x):
194
- # If self.sequence_parallel is True, we're doing Tensor Parallel with sequence parallelism:
195
- # we do an all_gather of x before doing the matmul.
196
- # If not, then the input is already gathered.
197
- return fused_dense_func(
198
- x,
199
- self.weight,
200
- self.bias,
201
- process_group=self.process_group,
202
- sequence_parallel=self.sequence_parallel,
203
- )
204
-
205
-
206
- class RowParallelLinear(nn.Linear):
207
- def __init__(
208
- self,
209
- in_features: int,
210
- out_features: int,
211
- process_group: ProcessGroup,
212
- bias: bool = True,
213
- sequence_parallel=True,
214
- multiple_of=1,
215
- device=None,
216
- dtype=None,
217
- ) -> None:
218
- world_size = torch.distributed.get_world_size(process_group)
219
- rank = torch.distributed.get_rank(process_group)
220
- if in_features % multiple_of:
221
- raise ValueError(f"in_features ({in_features}) must be a multiple of {multiple_of}")
222
- multiple = in_features // multiple_of
223
- # We want to split @multiple across world_size, but it could be an uneven split
224
- div = multiple // world_size
225
- mod = multiple % world_size
226
- # The first @mod ranks get @div + 1 copies, the rest get @div copies
227
- local_multiple = div + int(torch.distributed.get_rank(process_group) < mod)
228
- # Only rank 0 will have bias
229
- super().__init__(
230
- local_multiple * multiple_of,
231
- out_features,
232
- bias=bias and rank == 0,
233
- device=device,
234
- dtype=dtype,
235
- )
236
- self.process_group = process_group
237
- self.sequence_parallel = sequence_parallel
238
-
239
- def forward(self, x):
240
- """
241
- We're doing Tensor Parallel with sequence parallelism: we do the matmul and then
242
- a reduce_scatter of the result.
243
- """
244
- out = fused_dense_func(x, self.weight, self.bias)
245
- reduce_fn = reduce_scatter if self.sequence_parallel else all_reduce
246
- return reduce_fn(out, self.process_group)
247
-
248
-
249
- class FusedMLPFunc(torch.autograd.Function):
250
- @staticmethod
251
- @custom_fwd
252
- def forward(
253
- ctx,
254
- x,
255
- weight1,
256
- bias1,
257
- weight2,
258
- bias2,
259
- activation="gelu_approx",
260
- save_pre_act=True,
261
- return_residual=False,
262
- checkpoint_lvl=0,
263
- heuristic=0,
264
- process_group=None,
265
- sequence_parallel=True,
266
- ):
267
- """
268
- If process_group is not None and sequence_parallel=True, we're doing Tensor Parallel
269
- with sequence parallelism: we do an all_gather of x before doing the matmul.
270
- If sequence_parallel=False, then the input is already gathered.
271
-
272
- checkpoint_lvl:
273
- 0: no recomputation in the bwd
274
- 1: recompute gelu_out / relu_out in the bwd
275
- 2: recompute pre_act and gelu_out / relu_out in the bwd
276
- """
277
- assert -1 <= heuristic <= 4
278
- assert activation in ["gelu_approx", "relu", "sqrelu"]
279
- if activation == "sqrelu":
280
- assert heuristic == -1
281
- if not save_pre_act:
282
- checkpoint_lvl = 2
283
- assert checkpoint_lvl in [0, 1, 2]
284
- ctx.return_residual = return_residual
285
- ctx.process_group = process_group
286
- ctx.sequence_parallel = sequence_parallel
287
- ctx.checkpoint_lvl = checkpoint_lvl
288
- ctx.activation = activation
289
- ctx.heuristic = heuristic
290
-
291
- if torch.is_autocast_enabled():
292
- x = x.to(dtype=torch.get_autocast_gpu_dtype())
293
- x = x.contiguous()
294
- if process_group is not None and sequence_parallel:
295
- # We want to kick off the all_gather early, before weight dtype conversion
296
- total_x, handle_x = all_gather_raw(x, process_group, async_op=True)
297
- else:
298
- total_x = x
299
-
300
- if torch.is_autocast_enabled():
301
- dtype = torch.get_autocast_gpu_dtype()
302
- weight1, weight2 = [a.to(dtype=dtype) for a in [weight1, weight2]]
303
- bias1 = bias1.to(dtype=dtype) if bias1 is not None else None
304
- bias2 = bias2.to(dtype=dtype) if bias2 is not None else None
305
- weight1 = weight1.contiguous()
306
- bias1 = bias1.contiguous() if bias1 is not None else None
307
- weight2 = weight2.contiguous()
308
- bias2 = bias2.contiguous() if bias2 is not None else None
309
- if process_group is not None and sequence_parallel:
310
- handle_x.wait()
311
- batch_shape, n = total_x.shape[:-1], total_x.shape[-1]
312
- batch_dim = batch_shape.numel()
313
- # https://github.com/pytorch/pytorch/blob/5b51849b48a7dbccd297286cc0110def4706f9e7/aten/src/ATen/native/cuda/Blas.cpp#L174
314
- if min(batch_dim, n, *weight1.shape, *weight2.shape) > 65535 * 32:
315
- raise RuntimeError("fused_dense only supports matrix dims <= 2M")
316
- if heuristic == -1:
317
- pre_act = F.linear(total_x, weight1, bias1)
318
- activation_fn = (
319
- partial(F.gelu, approximate="tanh")
320
- if activation == "gelu_approx"
321
- else (sqrelu_fwd if activation == "sqrelu" else F.relu)
322
- )
323
- with torch.jit.fuser("fuser2"):
324
- output1 = activation_fn(pre_act)
325
- # This is before adding bias1
326
- # pre_act = F.linear(total_x.reshape(batch_dim, n), weight1)
327
- # with torch.jit.fuser('fuser2'):
328
- # output1 = bias_gelu(pre_act, bias1)
329
- else:
330
- is_gelu = activation == "gelu_approx"
331
- output1, *rest = fused_dense_cuda.linear_act_forward(
332
- total_x.reshape(batch_dim, n), weight1, bias1, is_gelu, save_pre_act, heuristic
333
- )
334
- if save_pre_act:
335
- pre_act = rest[0]
336
- output2 = F.linear(output1, weight2, bias2)
337
- if checkpoint_lvl == 0 or (checkpoint_lvl == 1 and activation == "relu"):
338
- # For RELU the pre_act is very small (just a bit-mask) so we just save it
339
- ctx.save_for_backward(x, weight1, weight2, pre_act, output1)
340
- elif checkpoint_lvl == 1:
341
- ctx.save_for_backward(x, weight1, weight2, pre_act)
342
- elif checkpoint_lvl == 2:
343
- ctx.save_for_backward(x, weight1, weight2, bias1)
344
- output2 = output2.reshape(*batch_shape, output2.shape[-1])
345
- return output2 if not return_residual else (output2, x)
346
-
347
- @staticmethod
348
- @custom_bwd
349
- def backward(ctx, grad_output, *args):
350
- grad_output = grad_output.contiguous()
351
- checkpoint_lvl = ctx.checkpoint_lvl
352
- activation = ctx.activation
353
- activation_fn = (
354
- partial(F.gelu, approximate="tanh")
355
- if activation == "gelu_approx"
356
- else (sqrelu_fwd if activation == "sqrelu" else F.relu)
357
- )
358
- if ctx.return_residual:
359
- (grad_input,) = args
360
- grad_input = grad_input.contiguous()
361
- process_group = ctx.process_group
362
- sequence_parallel = ctx.sequence_parallel
363
- x, weight1, weight2, *rest = ctx.saved_tensors
364
- if process_group is None or not sequence_parallel:
365
- total_x = x
366
- batch_shape = grad_output.shape[:-1]
367
- batch_dim = batch_shape.numel()
368
- if checkpoint_lvl in [0, 1]:
369
- if process_group is not None and sequence_parallel:
370
- total_x, handle_x = all_gather_raw(x, process_group, async_op=True)
371
- if checkpoint_lvl == 0 or (checkpoint_lvl == 1 and activation == "relu"):
372
- pre_act, output1 = rest
373
- elif checkpoint_lvl == 1:
374
- (pre_act,) = rest
375
- with torch.jit.fuser("fuser2"):
376
- output1 = activation_fn(pre_act)
377
- elif checkpoint_lvl == 2:
378
- (bias1,) = rest
379
- if process_group is not None and sequence_parallel:
380
- total_x, _ = all_gather_raw(x, process_group)
381
- if ctx.heuristic == -1:
382
- pre_act = F.linear(total_x, weight1, bias1)
383
- with torch.jit.fuser("fuser2"):
384
- output1 = activation_fn(pre_act)
385
- else:
386
- output1, pre_act = fused_dense_cuda.linear_act_forward(
387
- total_x.reshape(batch_dim, total_x.shape[-1]),
388
- weight1,
389
- bias1,
390
- activation == "gelu_approx",
391
- True,
392
- ctx.heuristic,
393
- )
394
-
395
- grad_output = grad_output.reshape(batch_dim, grad_output.shape[-1])
396
- output1 = output1.reshape(batch_dim, output1.shape[-1])
397
- pre_act = pre_act.reshape(batch_dim, pre_act.shape[-1])
398
- if ctx.needs_input_grad[3]:
399
- grad_weight2, grad_bias2 = fused_dense_cuda.linear_bias_wgrad(
400
- output1, grad_output, ctx.needs_input_grad[4]
401
- )
402
- else:
403
- grad_weight2 = None
404
- grad_bias2 = grad_output if ctx.needs_input_grad[4] else None
405
- if ctx.heuristic == -1:
406
- # grad_pre_act = matmul_dgelu(grad_output, weight2, pre_act)
407
- grad_output1 = F.linear(grad_output, weight2.t())
408
- activation_grad_fn = (
409
- gelu_bwd
410
- if activation == "gelu_approx"
411
- else (sqrelu_bwd if activation == "sqrelu" else relu_bwd)
412
- )
413
- with torch.jit.fuser("fuser2"):
414
- grad_pre_act = activation_grad_fn(grad_output1, pre_act)
415
- else:
416
- # The cublasLt epilogue has to compute both gelu/relu grad and bias grad, we can't
417
- # just compute gelu/relu grad
418
- grad_pre_act, grad_bias1 = fused_dense_cuda.bias_act_linear_dgrad_bgrad(
419
- weight2, grad_output, pre_act, activation == "gelu_approx", ctx.heuristic
420
- )
421
- if not ctx.needs_input_grad[2]:
422
- grad_bias1 = None
423
- if ctx.needs_input_grad[0]:
424
- if not ctx.return_residual:
425
- grad_input = F.linear(grad_pre_act, weight1.t())
426
- else:
427
- grad_input = torch.addmm(
428
- grad_input.reshape(batch_dim, grad_input.shape[-1]), grad_pre_act, weight1
429
- )
430
- grad_input = grad_input.reshape(*batch_shape, grad_input.shape[-1])
431
- if process_group is not None:
432
- reduce_fn = reduce_scatter_raw if sequence_parallel else all_reduce_raw
433
- grad_input, handle_grad_input = reduce_fn(grad_input, process_group, async_op=True)
434
- else:
435
- grad_input = None
436
- if ctx.heuristic == -1:
437
- if ctx.needs_input_grad[1]:
438
- if process_group is not None and sequence_parallel and checkpoint_lvl != 2:
439
- handle_x.wait()
440
- grad_weight1, grad_bias1 = fused_dense_cuda.linear_bias_wgrad(
441
- total_x.reshape(batch_dim, total_x.shape[-1]),
442
- grad_pre_act,
443
- ctx.needs_input_grad[2],
444
- )
445
- else:
446
- grad_weight1 = None
447
- grad_bias1 = grad_pre_act if ctx.needs_input_grad[2] else None
448
- else:
449
- if ctx.needs_input_grad[1]:
450
- if process_group is not None and sequence_parallel and checkpoint_lvl != 2:
451
- handle_x.wait()
452
- grad_weight1 = F.linear(
453
- grad_pre_act.t(), total_x.reshape(batch_dim, total_x.shape[-1]).t()
454
- )
455
- else:
456
- grad_weight1 = None
457
- if process_group is not None and ctx.needs_input_grad[0]:
458
- handle_grad_input.wait()
459
- return (
460
- grad_input,
461
- grad_weight1,
462
- grad_bias1,
463
- grad_weight2,
464
- grad_bias2,
465
- None,
466
- None,
467
- None,
468
- None,
469
- None,
470
- None,
471
- None,
472
- )
473
-
474
-
475
- def fused_mlp_func(
476
- x: Tensor,
477
- weight1: Tensor,
478
- weight2: Tensor,
479
- bias1: Optional[Tensor] = None,
480
- bias2: Optional[Tensor] = None,
481
- activation: str = "gelu_approx",
482
- save_pre_act: bool = True,
483
- return_residual: bool = False,
484
- checkpoint_lvl: int = 0,
485
- heuristic: int = 0,
486
- process_group: Optional[ProcessGroup] = None,
487
- sequence_parallel: bool = True,
488
- ):
489
- assert activation in ["gelu_approx", "relu", "sqrelu"]
490
- dtype_eligible = x.dtype in [torch.float16, torch.bfloat16] or (
491
- x.dtype == torch.float32 and torch.is_autocast_enabled()
492
- )
493
- # If we save pre-activation, dimension must be divisible by 128 (relu) or 8 (gelu)
494
- dim_eligible = not save_pre_act or (x.shape[-1] % (128 if activation == "relu" else 8) == 0)
495
- if (
496
- x.is_cuda
497
- and weight1.is_cuda
498
- and weight2.is_cuda
499
- and (bias1 is None or bias1.is_cuda)
500
- and (bias2 is None or bias2.is_cuda)
501
- and dtype_eligible
502
- and dim_eligible
503
- ):
504
- return FusedMLPFunc.apply(
505
- x,
506
- weight1,
507
- bias1,
508
- weight2,
509
- bias2,
510
- activation,
511
- save_pre_act,
512
- return_residual,
513
- checkpoint_lvl,
514
- heuristic,
515
- process_group,
516
- sequence_parallel,
517
- )
518
- else:
519
- assert process_group is None
520
- pre_act = F.linear(x, weight1, bias1)
521
- activation_fn = (
522
- partial(F.gelu, approximate="tanh")
523
- if activation == "gelu_approx"
524
- else partial(F.relu, inplace=True)
525
- )
526
- output1 = activation_fn(pre_act)
527
- output2 = F.linear(output1, weight2, bias2)
528
- return output2 if not return_residual else (output2, x)
529
-
530
-
531
- class FusedMLP(nn.Module):
532
- def __init__(
533
- self,
534
- in_features,
535
- hidden_features=None,
536
- out_features=None,
537
- bias1=True,
538
- bias2=True,
539
- activation="gelu_approx",
540
- return_residual=False,
541
- checkpoint_lvl=0,
542
- heuristic="auto",
543
- device=None,
544
- dtype=None,
545
- ):
546
- """
547
- If process_group is not None, we're doing Tensor Parallel with sequence parallelism:
548
- we do an all_gather of x before doing the matmul, gelu, then matmul.
549
- Finally we do a reduce_scatter of the output.
550
-
551
- checkpoint_lvl (increasing lvl means slower but more memory saving):
552
- 0: no recomputation in the bwd
553
- 1: recompute gelu_out in the bwd
554
- 2: recompute pre_act and gelu_out in the bwd
555
- heuristic:
556
- -1: don't fuse gemm + gelu (separate kernel)
557
- 0..4: use this heuristic for the algo section in the fused gemm + gelu
558
- 'auto': heuristic will be picked automatically:
559
- For CUDA >= 11.8, we set heuristic=0 for both fp16 and bf16 for best perf.
560
- For CUDA <= 11.7, we set heuristic=1 for fp16 and heuristic=-1 for bf16.
561
- For H100, we set heuristic=-1 for both fp16 and bf16 as the fused cuBlasLt implementation
562
- is slower than the unfused version.
563
- return_residual: whether to return the input x along with the output. This is for
564
- performance reason: for post-norm architecture, returning the input allows us
565
- to fuse the backward of nn.Linear with the residual connection.
566
- """
567
- assert checkpoint_lvl in [0, 1, 2]
568
- assert activation in ["gelu_approx", "relu", "sqrelu"]
569
- factory_kwargs = {"device": device, "dtype": dtype}
570
- super().__init__()
571
- out_features = out_features or in_features
572
- hidden_features = hidden_features or in_features * 4
573
- self.activation = activation
574
- self.return_residual = return_residual
575
- self.checkpoint_lvl = checkpoint_lvl
576
- self.heuristic = heuristic if activation != "sqrelu" else -1
577
- self.fc1 = nn.Linear(in_features, hidden_features, bias=bias1, **factory_kwargs)
578
- self.fc2 = nn.Linear(hidden_features, out_features, bias=bias2, **factory_kwargs)
579
-
580
- def forward(self, x, process_group=None):
581
- dtype = x.dtype if not torch.is_autocast_enabled() else torch.get_autocast_gpu_dtype()
582
- if self.heuristic == "auto":
583
- if self.activation == "gelu_approx":
584
- if torch.cuda.get_device_capability("cuda") == (9, 0):
585
- heuristic = -1
586
- else:
587
- cuda_ver = tuple(map(int, torch.version.cuda.split(".")))
588
- heuristic = 0 if cuda_ver >= (11, 8) else (1 if dtype == torch.float16 else -1)
589
- else:
590
- heuristic = 0
591
- else:
592
- heuristic = self.heuristic
593
- out = fused_mlp_func(
594
- x,
595
- self.fc1.weight,
596
- self.fc2.weight,
597
- self.fc1.bias,
598
- self.fc2.bias,
599
- activation=self.activation,
600
- save_pre_act=self.training,
601
- return_residual=self.return_residual,
602
- checkpoint_lvl=self.checkpoint_lvl,
603
- heuristic=heuristic,
604
- process_group=process_group,
605
- )
606
- if self.return_residual:
607
- out, x = out
608
- if process_group is not None:
609
- out = reduce_scatter(out, process_group)
610
- return out if not self.return_residual else (out, x)
611
-
612
-
613
- class ParallelFusedMLP(nn.Module):
614
- def __init__(
615
- self,
616
- in_features,
617
- hidden_features=None,
618
- out_features=None,
619
- activation="gelu_approx",
620
- process_group: ProcessGroup = None,
621
- bias1=True,
622
- bias2=True,
623
- sequence_parallel=True,
624
- checkpoint_lvl=0,
625
- heuristic="auto",
626
- device=None,
627
- dtype=None,
628
- ):
629
- """
630
- process_group is required. We're doing Tensor Parallel with sequence parallelism:
631
- we do an all_gather of x before doing the matmul, gelu, then matmul.
632
- Finally we do a reduce_scatter of the output.
633
-
634
- checkpoint_lvl (increasing lvl means slower but more memory saving):
635
- 0: no recomputation in the bwd
636
- 1: recompute gelu_out in the bwd
637
- 2: recompute pre_act and gelu_out in the bwd
638
- heuristic:
639
- -1: don't fuse gemm + gelu (separate kernel)
640
- 0..4: use this heuristic for the algo section in the fused gemm + gelu
641
- 'auto': heuristic will be picked automatically:
642
- For CUDA >= 11.8, we set heuristic=0 for both fp16 and bf16 for best perf.
643
- For CUDA <= 11.7, we set heuristic=1 for fp16 and heuristic=-1 for bf16.
644
- """
645
- assert checkpoint_lvl in [0, 1, 2]
646
- assert activation in ["gelu_approx", "relu", "sqrelu"]
647
- assert process_group is not None
648
- factory_kwargs = {"device": device, "dtype": dtype}
649
- super().__init__()
650
- out_features = out_features or in_features
651
- hidden_features = hidden_features or in_features * 4
652
- self.activation = activation
653
- self.process_group = process_group
654
- self.sequence_parallel = sequence_parallel
655
- self.checkpoint_lvl = checkpoint_lvl
656
- self.heuristic = heuristic if activation != "sqrelu" else -1
657
- self.fc1 = ColumnParallelLinear(
658
- in_features, hidden_features, process_group, bias=bias1, **factory_kwargs
659
- )
660
- self.fc2 = RowParallelLinear(
661
- hidden_features, out_features, process_group, bias=bias2, **factory_kwargs
662
- )
663
-
664
- def forward(self, x):
665
- dtype = x.dtype if not torch.is_autocast_enabled() else torch.get_autocast_gpu_dtype()
666
- if self.heuristic == "auto":
667
- if self.activation == "gelu_approx":
668
- cuda_ver = tuple(map(int, torch.version.cuda.split(".")))
669
- heuristic = 0 if cuda_ver >= (11, 8) else (1 if dtype == torch.float16 else -1)
670
- else:
671
- heuristic = 0
672
- else:
673
- heuristic = self.heuristic
674
- out = fused_mlp_func(
675
- x,
676
- self.fc1.weight,
677
- self.fc2.weight,
678
- self.fc1.bias,
679
- self.fc2.bias,
680
- activation=self.activation,
681
- save_pre_act=self.training,
682
- checkpoint_lvl=self.checkpoint_lvl,
683
- heuristic=heuristic,
684
- process_group=self.process_group,
685
- sequence_parallel=self.sequence_parallel,
686
- )
687
- reduce_fn = reduce_scatter if self.sequence_parallel else all_reduce
688
- return reduce_fn(out, self.process_group)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/flash_attn/ops/layer_norm.py DELETED
@@ -1,800 +0,0 @@
1
- # Copyright (c) 2022, Tri Dao.
2
- # Adapted from https://github.com/NVIDIA/apex/blob/master/apex/contrib/layer_norm/layer_norm.py
3
-
4
- import dropout_layer_norm
5
- import torch
6
- from torch.nn import init
7
-
8
-
9
- def maybe_align(x, alignment_in_bytes=16):
10
- """Assume that x already has last dim divisible by alignment_in_bytes"""
11
- # TD [2023-07-04] I'm not 100% sure that clone will align the memory
12
- # https://discuss.pytorch.org/t/how-to-ensure-that-tensor-data-ptr-is-aligned-to-16-bytes/183440
13
- return x if x.data_ptr() % alignment_in_bytes == 0 else x.clone()
14
-
15
-
16
- def _dropout_add_layer_norm_forward(
17
- x0,
18
- residual,
19
- gamma,
20
- beta,
21
- rowscale,
22
- colscale,
23
- dropout_p,
24
- epsilon,
25
- residual_in_fp32=False,
26
- is_rms_norm=False,
27
- ):
28
- """Assume that arguments are contiguous and aligned to 16 bytes"""
29
- hidden_size = gamma.numel()
30
- x0mat = x0.view((-1, hidden_size))
31
- residualmat = residual.view((-1, hidden_size)) if residual is not None else None
32
- rowscale = rowscale.view(-1) if rowscale is not None else None
33
- zmat, xmat, dmask, mu, rsigma = dropout_layer_norm.dropout_add_ln_fwd(
34
- x0mat,
35
- residualmat,
36
- gamma,
37
- beta,
38
- rowscale,
39
- colscale,
40
- None,
41
- None,
42
- dropout_p,
43
- epsilon,
44
- 1.0,
45
- 0,
46
- None,
47
- residual_in_fp32,
48
- is_rms_norm,
49
- )
50
- # dmask is None if dropout_p == 0.0
51
- # xmat is None if dropout_p == 0.0 and residual is None and residual_dtype != input_dtype
52
- return zmat, xmat if xmat is not None else x0mat, dmask, mu, rsigma
53
-
54
-
55
- def _dropout_add_layer_norm_backward(
56
- dz,
57
- dx,
58
- x,
59
- x0,
60
- dmask,
61
- mu,
62
- rsigma,
63
- gamma,
64
- rowscale,
65
- colscale,
66
- dropout_p,
67
- has_residual,
68
- is_rms_norm=False,
69
- ):
70
- """Assume that arguments are contiguous and aligned to 16 bytes
71
- dx == None means that it was a post-norm architecture
72
- (x = drop(x0) + residual was not returned in the fwd).
73
- x0 must not be None if we have colscale.
74
- """
75
- hidden_size = gamma.numel()
76
- xmat = x.view((-1, hidden_size))
77
- dzmat = dz.view(xmat.shape)
78
- dxmat = dx.view(xmat.shape) if dx is not None else None
79
- x0mat = x0.view((-1, hidden_size)) if x0 is not None else None
80
- rowscale = rowscale.view(-1) if rowscale is not None else None
81
- if colscale is not None:
82
- assert x0 is not None, "x0 is required to compute the gradient of colscale"
83
- dx0mat, dresidualmat, dgamma, dbeta, _, _, *rest = dropout_layer_norm.dropout_add_ln_bwd(
84
- dzmat,
85
- dxmat,
86
- xmat,
87
- x0mat,
88
- dmask,
89
- mu,
90
- rsigma,
91
- gamma,
92
- rowscale,
93
- colscale,
94
- None,
95
- None,
96
- dropout_p,
97
- 1.0,
98
- 0,
99
- has_residual,
100
- is_rms_norm,
101
- )
102
- # dresidualmat is None if not has_residual
103
- if colscale is None:
104
- return dx0mat, dresidualmat, dgamma, dbeta
105
- else:
106
- dcolscale = rest[0]
107
- return dx0mat, dresidualmat, dgamma, dbeta, dcolscale
108
-
109
-
110
- def _dropout_add_layer_norm_subset_forward(
111
- x0,
112
- residual,
113
- gamma,
114
- beta,
115
- colscale,
116
- x0_subset,
117
- out_subset,
118
- dropout_p,
119
- epsilon,
120
- rowscale_const,
121
- out_numrows,
122
- residual_in_fp32=False,
123
- is_rms_norm=False,
124
- ):
125
- """Assume that arguments are contiguous and aligned to 16 bytes"""
126
- hidden_size = gamma.numel()
127
- x0mat = x0.view((-1, hidden_size))
128
- residualmat = residual.view((-1, hidden_size)) if residual is not None else None
129
- x0_subset = x0_subset.view(-1) if x0_subset is not None else None
130
- out_subset = out_subset.view(-1) if out_subset is not None else None
131
- zmat, xmat, dmask, mu, rsigma = dropout_layer_norm.dropout_add_ln_fwd(
132
- x0mat,
133
- residualmat,
134
- gamma,
135
- beta,
136
- None,
137
- colscale,
138
- x0_subset,
139
- out_subset,
140
- dropout_p,
141
- epsilon,
142
- rowscale_const,
143
- out_numrows,
144
- None,
145
- residual_in_fp32,
146
- is_rms_norm,
147
- )
148
- # dmask is None if dropout_p == 0.0
149
- # xmat is None if dropout_p == 0.0 and residual is None and residual_dtype != input_dtype
150
- return zmat, xmat if xmat is not None else x0mat, dmask, mu, rsigma
151
-
152
-
153
- def _dropout_add_layer_norm_subset_backward(
154
- dz,
155
- dx,
156
- x,
157
- x0,
158
- dmask,
159
- mu,
160
- rsigma,
161
- gamma,
162
- colscale,
163
- x0_subset,
164
- out_subset,
165
- dropout_p,
166
- rowscale_const,
167
- x0_numrows,
168
- has_residual,
169
- is_rms_norm=False,
170
- ):
171
- """Assume that arguments are contiguous and aligned to 16 bytes
172
- dx == None means that it was a post-norm architecture
173
- (x = drop(x0) + residual was not returned in the fwd).
174
- x0 must not be None if we have colscale.
175
- """
176
- hidden_size = gamma.numel()
177
- xmat = x.view((-1, hidden_size))
178
- dzmat = dz.view(-1, hidden_size)
179
- dxmat = dx.view(xmat.shape) if dx is not None else None
180
- x0mat = x0.view((-1, hidden_size)) if x0 is not None else None
181
- x0_subset = x0_subset.view(-1) if x0_subset is not None else None
182
- out_subset = out_subset.view(-1) if out_subset is not None else None
183
- if colscale is not None:
184
- assert x0 is not None, "x0 is required to compute the gradient of colscale"
185
- dx0mat, dresidualmat, dgamma, dbeta, _, _, *rest = dropout_layer_norm.dropout_add_ln_bwd(
186
- dzmat,
187
- dxmat,
188
- xmat,
189
- x0mat,
190
- dmask,
191
- mu,
192
- rsigma,
193
- gamma,
194
- None,
195
- colscale,
196
- x0_subset,
197
- out_subset,
198
- dropout_p,
199
- rowscale_const,
200
- x0_numrows,
201
- has_residual,
202
- is_rms_norm,
203
- )
204
- # dresidualmat is None if not has_residual
205
- if colscale is None:
206
- return dx0mat, dresidualmat, dgamma, dbeta
207
- else:
208
- dcolscale = rest[0]
209
- return dx0mat, dresidualmat, dgamma, dbeta, dcolscale
210
-
211
-
212
- def _dropout_add_layer_norm_parallel_residual_forward(
213
- x0,
214
- x1,
215
- residual,
216
- gamma0,
217
- beta0,
218
- gamma1,
219
- beta1,
220
- dropout_p,
221
- epsilon,
222
- residual_in_fp32=False,
223
- is_rms_norm=False,
224
- ):
225
- """Assume that arguments are contiguous and aligned to 16 bytes"""
226
- hidden_size = gamma0.numel()
227
- x0mat = x0.view((-1, hidden_size))
228
- x1mat = x1.view((-1, hidden_size)) if x1 is not None else None
229
- residualmat = residual.view((-1, hidden_size)) if residual is not None else None
230
- (
231
- z0mat,
232
- z1mat,
233
- xmat,
234
- dmask0,
235
- dmask1,
236
- mu,
237
- rsigma,
238
- ) = dropout_layer_norm.dropout_add_ln_parallel_residual_fwd(
239
- x0mat,
240
- x1mat,
241
- residualmat,
242
- gamma0,
243
- beta0,
244
- gamma1,
245
- beta1,
246
- dropout_p,
247
- epsilon,
248
- None,
249
- residual_in_fp32,
250
- is_rms_norm,
251
- )
252
- # dmask0 and dmask1 are None if dropout_p == 0.0
253
- # xmat is None if dropout_p == 0.0 and residual is None and residual_dtype != input_dtype
254
- return z0mat, z1mat, xmat if xmat is not None else x0mat, dmask0, dmask1, mu, rsigma
255
-
256
-
257
- def _dropout_add_layer_norm_parallel_residual_backward(
258
- dz0,
259
- dz1,
260
- dx,
261
- x,
262
- dmask0,
263
- dmask1,
264
- mu,
265
- rsigma,
266
- gamma0,
267
- gamma1,
268
- dropout_p,
269
- has_x1,
270
- has_residual,
271
- is_rms_norm=False,
272
- ):
273
- """Assume that arguments are contiguous and aligned to 16 bytes
274
- dx == None means that it was a post-norm architecture
275
- (x = drop(x0) + residual was not returned in the fwd).
276
- """
277
- hidden_size = gamma0.numel()
278
- xmat = x.view((-1, hidden_size))
279
- dz0mat = dz0.view(xmat.shape)
280
- dz1mat = dz1.view(xmat.shape) if dz1 is not None else None
281
- dxmat = dx.view(xmat.shape) if dx is not None else None
282
- (
283
- dx0mat,
284
- dx1mat,
285
- dresidualmat,
286
- dgamma0,
287
- dbeta0,
288
- dgamma1,
289
- dbeta1,
290
- *rest,
291
- ) = dropout_layer_norm.dropout_add_ln_parallel_residual_bwd(
292
- dz0mat,
293
- dz1mat,
294
- dxmat,
295
- xmat,
296
- dmask0,
297
- dmask1,
298
- mu,
299
- rsigma,
300
- gamma0,
301
- gamma1,
302
- dropout_p,
303
- has_x1,
304
- has_residual,
305
- is_rms_norm,
306
- )
307
- # dresidualmat is None if not has_residual
308
- return dx0mat, dx1mat, dresidualmat, dgamma0, dbeta0, dgamma1, dbeta1
309
-
310
-
311
- class DropoutAddLayerNormFn(torch.autograd.Function):
312
- @staticmethod
313
- def forward(
314
- ctx,
315
- x0,
316
- residual,
317
- gamma,
318
- beta,
319
- rowscale,
320
- colscale,
321
- dropout_p,
322
- epsilon,
323
- residual_in_fp32=False,
324
- prenorm=False,
325
- is_rms_norm=False,
326
- return_dmask=False,
327
- ):
328
- x0 = maybe_align(x0.contiguous(), 16)
329
- residual = maybe_align(residual.contiguous(), 16) if residual is not None else None
330
- gamma = maybe_align(gamma.contiguous(), 16)
331
- beta = maybe_align(beta.contiguous(), 16) if beta is not None else None
332
- rowscale = maybe_align(rowscale.contiguous(), 16) if rowscale is not None else None
333
- colscale = maybe_align(colscale.contiguous(), 16) if colscale is not None else None
334
- zmat, xmat, dmask, mu, rsigma = _dropout_add_layer_norm_forward(
335
- x0,
336
- residual,
337
- gamma,
338
- beta,
339
- rowscale,
340
- colscale,
341
- dropout_p,
342
- epsilon,
343
- residual_in_fp32,
344
- is_rms_norm,
345
- )
346
- # Only need to save x0 if we need to compute gradient wrt colscale
347
- x0_saved = x0 if colscale is not None else None
348
- ctx.save_for_backward(
349
- xmat.view(x0.shape), x0_saved, dmask, gamma, mu, rsigma, rowscale, colscale
350
- )
351
- ctx.prenorm = prenorm
352
- ctx.dropout_p = dropout_p
353
- ctx.has_residual = residual is not None
354
- ctx.is_rms_norm = is_rms_norm
355
- ctx.has_beta = beta is not None
356
- if not return_dmask:
357
- return (
358
- zmat.view(x0.shape) if not prenorm else (zmat.view(x0.shape), xmat.view(x0.shape))
359
- )
360
- else:
361
- dmask = (
362
- dmask.view(x0.shape)
363
- if dropout_p > 0.0
364
- else torch.ones(x0.shape, dtype=torch.uint8, device=x0.device)
365
- )
366
- ctx.mark_non_differentiable(dmask)
367
- return (
368
- (zmat.view(x0.shape), dmask)
369
- if not prenorm
370
- else (zmat.view(x0.shape), xmat.view(x0.shape), dmask)
371
- )
372
-
373
- @staticmethod
374
- def backward(ctx, dz, *args):
375
- # assert dz.is_contiguous()
376
- dz = maybe_align(dz.contiguous(), 16) # this happens!
377
- dx = maybe_align(args[0].contiguous(), 16) if ctx.prenorm else None
378
- x, x0, dmask, gamma, mu, rsigma, rowscale, colscale = ctx.saved_tensors
379
- # x0 is None if colscale is None
380
- dropout_p = ctx.dropout_p
381
- has_residual = ctx.has_residual
382
- dx0mat, dresidualmat, dgamma, dbeta, *rest = _dropout_add_layer_norm_backward(
383
- dz,
384
- dx,
385
- x,
386
- x0,
387
- dmask,
388
- mu,
389
- rsigma,
390
- gamma,
391
- rowscale,
392
- colscale,
393
- dropout_p,
394
- has_residual,
395
- ctx.is_rms_norm,
396
- )
397
- dx0 = dx0mat.view(x.shape)
398
- dresidual = dresidualmat.view(x.shape) if dresidualmat is not None else None
399
- dcolscale = rest[0] if colscale is not None else None
400
- return (
401
- dx0,
402
- dresidual,
403
- dgamma,
404
- dbeta if ctx.has_beta else None,
405
- None,
406
- dcolscale,
407
- None,
408
- None,
409
- None,
410
- None,
411
- None,
412
- None,
413
- )
414
-
415
-
416
- class DropoutAddLayerNormSubsetFn(torch.autograd.Function):
417
- @staticmethod
418
- def forward(
419
- ctx,
420
- x0,
421
- residual,
422
- gamma,
423
- beta,
424
- colscale,
425
- x0_subset,
426
- out_subset,
427
- dropout_p,
428
- epsilon,
429
- rowscale_const,
430
- out_numrows,
431
- residual_in_fp32=False,
432
- prenorm=False,
433
- is_rms_norm=False,
434
- return_dmask=False,
435
- ):
436
- x0 = maybe_align(x0.contiguous(), 16)
437
- residual = maybe_align(residual.contiguous(), 16) if residual is not None else None
438
- gamma = maybe_align(gamma.contiguous(), 16)
439
- beta = maybe_align(beta.contiguous(), 16) if beta is not None else None
440
- colscale = maybe_align(colscale.contiguous(), 16) if colscale is not None else None
441
- zmat, xmat, dmask, mu, rsigma = _dropout_add_layer_norm_subset_forward(
442
- x0,
443
- residual,
444
- gamma,
445
- beta,
446
- colscale,
447
- x0_subset,
448
- out_subset,
449
- dropout_p,
450
- epsilon,
451
- rowscale_const,
452
- out_numrows,
453
- residual_in_fp32,
454
- is_rms_norm,
455
- )
456
- # Only need to save x0 if we need to compute gradient wrt colscale
457
- x0_saved = x0 if colscale is not None else None
458
- x_shape = (-1, *x0.shape[1:])
459
- ctx.save_for_backward(
460
- xmat.view(x_shape), x0_saved, dmask, gamma, mu, rsigma, colscale, x0_subset, out_subset
461
- )
462
- ctx.prenorm = prenorm
463
- ctx.dropout_p = dropout_p
464
- ctx.rowscale_const = rowscale_const
465
- ctx.x0_numrows = x0.shape[:-1].numel()
466
- ctx.has_residual = residual is not None
467
- ctx.is_rms_norm = is_rms_norm
468
- ctx.has_beta = beta is not None
469
- z_shape = (-1, *x0.shape[1:])
470
- if not return_dmask:
471
- return zmat.view(z_shape) if not prenorm else (zmat.view(z_shape), xmat.view(x0.shape))
472
- else:
473
- z = zmat.view(z_shape)
474
- dmask = (
475
- dmask.view(x0.shape)
476
- if dropout_p > 0.0
477
- else torch.ones(x0.shape, dtype=torch.uint8, device=x0.device)
478
- )
479
- ctx.mark_non_differentiable(dmask)
480
- return (z, dmask) if not prenorm else (z, xmat.view(x_shape), dmask)
481
-
482
- @staticmethod
483
- def backward(ctx, dz, *args):
484
- # assert dz.is_contiguous()
485
- dz = maybe_align(dz.contiguous(), 16) # this happens!
486
- dx = maybe_align(args[0].contiguous(), 16) if ctx.prenorm else None
487
- x, x0, dmask, gamma, mu, rsigma, colscale, x0_subset, out_subset = ctx.saved_tensors
488
- # x0 is None if colscale is None
489
- dropout_p = ctx.dropout_p
490
- has_residual = ctx.has_residual
491
- dx0mat, dresidualmat, dgamma, dbeta, *rest = _dropout_add_layer_norm_subset_backward(
492
- dz,
493
- dx,
494
- x,
495
- x0,
496
- dmask,
497
- mu,
498
- rsigma,
499
- gamma,
500
- colscale,
501
- x0_subset,
502
- out_subset,
503
- dropout_p,
504
- ctx.rowscale_const,
505
- ctx.x0_numrows,
506
- has_residual,
507
- ctx.is_rms_norm,
508
- )
509
- dx0 = dx0mat.view(-1, *x.shape[1:])
510
- dresidual = dresidualmat.view(x.shape) if dresidualmat is not None else None
511
- dcolscale = rest[0] if colscale is not None else None
512
- return (
513
- dx0,
514
- dresidual,
515
- dgamma,
516
- dbeta if ctx.has_beta else None,
517
- dcolscale,
518
- None,
519
- None,
520
- None,
521
- None,
522
- None,
523
- None,
524
- None,
525
- None,
526
- None,
527
- None,
528
- )
529
-
530
-
531
- class DropoutAddLayerNormParallelResidualFn(torch.autograd.Function):
532
- @staticmethod
533
- def forward(
534
- ctx,
535
- x0,
536
- x1,
537
- residual,
538
- gamma0,
539
- beta0,
540
- gamma1,
541
- beta1,
542
- dropout_p,
543
- epsilon,
544
- residual_in_fp32=False,
545
- prenorm=False,
546
- is_rms_norm=False,
547
- return_dmask=False,
548
- ):
549
- x0 = maybe_align(x0.contiguous(), 16)
550
- x1 = maybe_align(x1.contiguous(), 16) if x1 is not None else None
551
- residual = maybe_align(residual.contiguous(), 16) if residual is not None else None
552
- gamma0 = maybe_align(gamma0.contiguous(), 16)
553
- beta0 = maybe_align(beta0.contiguous(), 16) if beta0 is not None else None
554
- gamma1 = maybe_align(gamma1.contiguous(), 16) if gamma1 is not None else None
555
- beta1 = maybe_align(beta1.contiguous(), 16) if beta1 is not None else None
556
- (
557
- z0mat,
558
- z1mat,
559
- xmat,
560
- dmask0,
561
- dmask1,
562
- mu,
563
- rsigma,
564
- ) = _dropout_add_layer_norm_parallel_residual_forward(
565
- x0,
566
- x1,
567
- residual,
568
- gamma0,
569
- beta0,
570
- gamma1,
571
- beta1,
572
- dropout_p,
573
- epsilon,
574
- residual_in_fp32,
575
- is_rms_norm,
576
- )
577
- ctx.save_for_backward(xmat.view(x0.shape), dmask0, dmask1, gamma0, gamma1, mu, rsigma)
578
- ctx.prenorm = prenorm
579
- ctx.dropout_p = dropout_p
580
- ctx.has_x1 = x1 is not None
581
- ctx.has_residual = residual is not None
582
- ctx.is_rms_norm = is_rms_norm
583
- ctx.has_beta = beta0 is not None
584
- z = (z0mat.view(x0.shape), z1mat.view(x0.shape) if z1mat is not None else None)
585
- if not return_dmask:
586
- return z if not prenorm else (*z, xmat.view(x0.shape))
587
- else:
588
- dmask0 = (
589
- dmask0.view(x0.shape)
590
- if dropout_p > 0.0
591
- else torch.ones(x0.shape, dtype=torch.uint8, device=x0.device)
592
- )
593
- dmask1 = (
594
- dmask1.view(x0.shape)
595
- if dropout_p > 0.0 and x1 is not None
596
- else torch.ones(x0.shape, dtype=torch.uint8, device=x0.device)
597
- )
598
- ctx.mark_non_differentiable(dmask0)
599
- ctx.mark_non_differentiable(dmask1)
600
- return (
601
- (*z, dmask0, dmask1) if not prenorm else (*z, xmat.view(x0.shape), dmask0, dmask1)
602
- )
603
-
604
- @staticmethod
605
- def backward(ctx, dz0, dz1, *args):
606
- dz0 = maybe_align(dz0.contiguous(), 16) # this happens!
607
- dz1 = maybe_align(dz1.contiguous(), 16) if dz1 is not None else None
608
- dx = maybe_align(args[0].contiguous(), 16) if ctx.prenorm else None
609
- x, dmask0, dmask1, gamma0, gamma1, mu, rsigma = ctx.saved_tensors
610
- dropout_p = ctx.dropout_p
611
- has_x1 = ctx.has_x1
612
- has_residual = ctx.has_residual
613
- (
614
- dx0mat,
615
- dx1mat,
616
- dresidualmat,
617
- dgamma0,
618
- dbeta0,
619
- dgamma1,
620
- dbeta1,
621
- ) = _dropout_add_layer_norm_parallel_residual_backward(
622
- dz0,
623
- dz1,
624
- dx,
625
- x,
626
- dmask0,
627
- dmask1,
628
- mu,
629
- rsigma,
630
- gamma0,
631
- gamma1,
632
- dropout_p,
633
- has_x1,
634
- has_residual,
635
- ctx.is_rms_norm,
636
- )
637
- dx0 = dx0mat.view(x.shape)
638
- dx1 = dx1mat.view(x.shape) if dx1mat is not None else None
639
- dresidual = dresidualmat.view(x.shape) if dresidualmat is not None else None
640
- return (
641
- dx0,
642
- dx1,
643
- dresidual,
644
- dgamma0,
645
- dbeta0 if ctx.has_beta else None,
646
- dgamma1,
647
- dbeta1 if ctx.has_beta else None,
648
- None,
649
- None,
650
- None,
651
- None,
652
- None,
653
- None,
654
- )
655
-
656
-
657
- def layer_norm(x, weight, bias, epsilon):
658
- return DropoutAddLayerNormFn.apply(x, None, weight, bias, None, None, 0.0, epsilon, False)
659
-
660
-
661
- def dropout_add_layer_norm(
662
- x0,
663
- residual,
664
- weight,
665
- bias,
666
- dropout_p,
667
- epsilon,
668
- rowscale=None,
669
- layerscale=None,
670
- prenorm=False,
671
- residual_in_fp32=False,
672
- return_dropout_mask=False,
673
- ):
674
- """residual_in_fp32 only has an effect if residual is None.
675
- Otherwise residual dtype is residual.dtype.
676
- """
677
- return DropoutAddLayerNormFn.apply(
678
- x0,
679
- residual,
680
- weight,
681
- bias,
682
- rowscale,
683
- layerscale,
684
- dropout_p,
685
- epsilon,
686
- residual_in_fp32,
687
- prenorm,
688
- False,
689
- return_dropout_mask,
690
- )
691
-
692
-
693
- def dropout_add_layer_norm_subset(
694
- x0,
695
- residual,
696
- weight,
697
- bias,
698
- dropout_p,
699
- epsilon,
700
- layerscale=None,
701
- x0_subset=None,
702
- out_subset=None,
703
- rowscale_const=1.0,
704
- out_numrows=0,
705
- prenorm=False,
706
- residual_in_fp32=False,
707
- return_dropout_mask=False,
708
- ):
709
- """residual_in_fp32 only has an effect if residual is None.
710
- Otherwise residual dtype is residual.dtype.
711
- """
712
- return DropoutAddLayerNormSubsetFn.apply(
713
- x0,
714
- residual,
715
- weight,
716
- bias,
717
- layerscale,
718
- x0_subset,
719
- out_subset,
720
- dropout_p,
721
- epsilon,
722
- rowscale_const,
723
- out_numrows,
724
- residual_in_fp32,
725
- prenorm,
726
- False,
727
- return_dropout_mask,
728
- )
729
-
730
-
731
- def dropout_add_layer_norm_parallel_residual(
732
- x0,
733
- x1,
734
- residual,
735
- weight0,
736
- bias0,
737
- weight1,
738
- bias1,
739
- dropout_p,
740
- epsilon,
741
- prenorm=False,
742
- residual_in_fp32=False,
743
- return_dropout_mask=False,
744
- ):
745
- """residual_in_fp32 only has an effect if residual is None.
746
- Otherwise residual dtype is residual.dtype.
747
- """
748
- return DropoutAddLayerNormParallelResidualFn.apply(
749
- x0,
750
- x1,
751
- residual,
752
- weight0,
753
- bias0,
754
- weight1,
755
- bias1,
756
- dropout_p,
757
- epsilon,
758
- residual_in_fp32,
759
- prenorm,
760
- False,
761
- return_dropout_mask,
762
- )
763
-
764
-
765
- class DropoutAddLayerNorm(torch.nn.Module):
766
- def __init__(
767
- self,
768
- hidden_size,
769
- prenorm=False,
770
- p=0.0,
771
- eps=1e-5,
772
- residual_in_fp32=False,
773
- device=None,
774
- dtype=None,
775
- ):
776
- factory_kwargs = {"device": device, "dtype": dtype}
777
- super().__init__()
778
- self.prenorm = prenorm
779
- self.p = p
780
- self.eps = eps
781
- self.residual_in_fp32 = residual_in_fp32
782
- self.weight = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
783
- self.bias = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
784
- self.reset_parameters()
785
-
786
- def reset_parameters(self):
787
- init.ones_(self.weight)
788
- init.zeros_(self.bias)
789
-
790
- def forward(self, x0, residual=None):
791
- return dropout_add_layer_norm(
792
- x0,
793
- residual,
794
- self.weight,
795
- self.bias,
796
- self.p if self.training else 0.0,
797
- self.eps,
798
- prenorm=self.prenorm,
799
- residual_in_fp32=self.residual_in_fp32,
800
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/flash_attn/ops/rms_norm.py DELETED
@@ -1,174 +0,0 @@
1
- # Copyright (c) 2022, Tri Dao.
2
- # Adapted from https://github.com/NVIDIA/apex/blob/master/apex/contrib/layer_norm/layer_norm.py
3
-
4
- import torch
5
- from torch.nn import init
6
-
7
- from flash_attn.ops.layer_norm import (
8
- DropoutAddLayerNormFn,
9
- DropoutAddLayerNormParallelResidualFn,
10
- DropoutAddLayerNormSubsetFn,
11
- )
12
-
13
-
14
- def rms_norm(x, weight, epsilon):
15
- return DropoutAddLayerNormFn.apply(
16
- x, None, weight, None, None, None, 0.0, epsilon, False, False, True
17
- )
18
-
19
-
20
- def dropout_add_rms_norm(
21
- x0,
22
- residual,
23
- weight,
24
- bias,
25
- dropout_p,
26
- epsilon,
27
- rowscale=None,
28
- layerscale=None,
29
- prenorm=False,
30
- residual_in_fp32=False,
31
- return_dropout_mask=False,
32
- ):
33
- """residual_in_fp32 only has an effect if residual is None.
34
- Otherwise residual dtype is residual.dtype.
35
- """
36
- return DropoutAddLayerNormFn.apply(
37
- x0,
38
- residual,
39
- weight,
40
- bias,
41
- rowscale,
42
- layerscale,
43
- dropout_p,
44
- epsilon,
45
- residual_in_fp32,
46
- prenorm,
47
- True,
48
- return_dropout_mask,
49
- )
50
-
51
-
52
- def dropout_add_rms_norm_subset(
53
- x0,
54
- residual,
55
- weight,
56
- bias,
57
- dropout_p,
58
- epsilon,
59
- layerscale=None,
60
- x0_subset=None,
61
- out_subset=None,
62
- rowscale_const=1.0,
63
- out_numrows=0,
64
- prenorm=False,
65
- residual_in_fp32=False,
66
- return_dropout_mask=False,
67
- ):
68
- """residual_in_fp32 only has an effect if residual is None.
69
- Otherwise residual dtype is residual.dtype.
70
- """
71
- return DropoutAddLayerNormSubsetFn.apply(
72
- x0,
73
- residual,
74
- weight,
75
- bias,
76
- layerscale,
77
- x0_subset,
78
- out_subset,
79
- dropout_p,
80
- epsilon,
81
- rowscale_const,
82
- out_numrows,
83
- residual_in_fp32,
84
- prenorm,
85
- True,
86
- return_dropout_mask,
87
- )
88
-
89
-
90
- def dropout_add_rms_norm_parallel_residual(
91
- x0,
92
- x1,
93
- residual,
94
- weight0,
95
- bias0,
96
- weight1,
97
- bias1,
98
- dropout_p,
99
- epsilon,
100
- prenorm=False,
101
- residual_in_fp32=False,
102
- return_dropout_mask=False,
103
- ):
104
- """residual_in_fp32 only has an effect if residual is None.
105
- Otherwise residual dtype is residual.dtype.
106
- """
107
- return DropoutAddLayerNormParallelResidualFn.apply(
108
- x0,
109
- x1,
110
- residual,
111
- weight0,
112
- bias0,
113
- weight1,
114
- bias1,
115
- dropout_p,
116
- epsilon,
117
- residual_in_fp32,
118
- prenorm,
119
- True,
120
- return_dropout_mask,
121
- )
122
-
123
-
124
- class RMSNorm(torch.nn.Module):
125
- def __init__(self, hidden_size, eps=1e-5, device=None, dtype=None):
126
- factory_kwargs = {"device": device, "dtype": dtype}
127
- super().__init__()
128
- self.eps = eps
129
- self.weight = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
130
- self.register_parameter("bias", None)
131
- self.reset_parameters()
132
-
133
- def reset_parameters(self):
134
- init.ones_(self.weight)
135
-
136
- def forward(self, x):
137
- return rms_norm(x, self.weight, self.eps)
138
-
139
-
140
- class DropoutAddRMSNorm(torch.nn.Module):
141
- def __init__(
142
- self,
143
- hidden_size,
144
- prenorm=False,
145
- p=0.0,
146
- eps=1e-5,
147
- residual_in_fp32=False,
148
- device=None,
149
- dtype=None,
150
- ):
151
- factory_kwargs = {"device": device, "dtype": dtype}
152
- super().__init__()
153
- self.prenorm = prenorm
154
- self.p = p
155
- self.eps = eps
156
- self.residual_in_fp32 = residual_in_fp32
157
- self.weight = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
158
- self.register_parameter("bias", None)
159
- self.reset_parameters()
160
-
161
- def reset_parameters(self):
162
- init.ones_(self.weight)
163
-
164
- def forward(self, x0, residual=None):
165
- return dropout_add_rms_norm(
166
- x0,
167
- residual,
168
- self.weight,
169
- None,
170
- self.p if self.training else 0.0,
171
- self.eps,
172
- prenorm=self.prenorm,
173
- residual_in_fp32=self.residual_in_fp32,
174
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/flash_attn/ops/triton/__init__.py DELETED
@@ -1 +0,0 @@
1
-
 
 
build/torch26-cxx98-cu124-x86_64-linux/flash_attn/ops/triton/cross_entropy.py DELETED
@@ -1,330 +0,0 @@
1
- # Copyright (c) 2023, Tri Dao.
2
-
3
- from typing import Tuple, Optional, Union
4
-
5
- import torch
6
- import torch.nn.functional as F
7
-
8
- import triton
9
- import triton.language as tl
10
-
11
- # `all_gather_into_tensor` and `reduce_scatter_tensor` are new placeholders for
12
- # `_all_gather_base` and `_reduce_scatter_base`. They require the most recent
13
- # version of PyTorch. The following 2 lines are for backward compatibility with
14
- # older PyTorch.
15
- if "all_gather_into_tensor" not in dir(torch.distributed):
16
- torch.distributed.all_gather_into_tensor = torch.distributed._all_gather_base
17
-
18
-
19
- @triton.heuristics(
20
- {
21
- "HAS_SMOOTHING": lambda args: args["smoothing"] > 0.0,
22
- }
23
- )
24
- @triton.jit
25
- def cross_entropy_fwd_kernel(
26
- loss_ptr, # data ptrs
27
- lse_ptr,
28
- z_loss_ptr,
29
- logits_ptr,
30
- labels_ptr,
31
- smoothing,
32
- logit_scale,
33
- lse_square_scale,
34
- ignore_index,
35
- total_classes,
36
- class_start_idx, # Useful for tensor parallel when each rank only has a subset of classes
37
- n_cols, # shapes
38
- logits_row_stride, # strides
39
- BLOCK_SIZE: tl.constexpr,
40
- HAS_SMOOTHING: tl.constexpr,
41
- # if SPLIT (e.g. tensor parallel), don't include the LSE in the loss since it's not the final LSE
42
- SPLIT: tl.constexpr,
43
- PRECOMPUTED_LSE: tl.constexpr, # If LSE is already computed (also no smoothing and logit_scale == 1.0)
44
- ):
45
- row_idx = tl.program_id(0)
46
- logits_ptr = logits_ptr + row_idx * logits_row_stride.to(tl.int64)
47
- sum_logits = 0.0 # For smoothing
48
- if not PRECOMPUTED_LSE:
49
- # Statistics for online softmax
50
- m_i = -float("inf")
51
- l_i = 0.0
52
- for col_offset in range(0, n_cols, BLOCK_SIZE):
53
- cols = col_offset + tl.arange(0, BLOCK_SIZE)
54
- logits = tl.load(logits_ptr + cols, mask=cols < n_cols, other=-float("inf")).to(
55
- tl.float32
56
- ) * logit_scale
57
- if HAS_SMOOTHING:
58
- sum_logits += tl.sum(tl.where(cols < n_cols, logits, 0.0))
59
- m_i_new = tl.maximum(m_i, tl.max(logits))
60
- l_i = tl.exp(m_i - m_i_new) * l_i + tl.sum(tl.exp(logits - m_i_new))
61
- m_i = m_i_new
62
- lse = tl.log(l_i) + m_i
63
- tl.store(lse_ptr + row_idx, lse)
64
- else:
65
- lse = tl.load(lse_ptr + row_idx)
66
- label_idx = tl.load(labels_ptr + row_idx)
67
- if label_idx == ignore_index:
68
- loss = 0.0
69
- z_loss = 0.0
70
- else:
71
- label_idx -= class_start_idx
72
- if label_idx >= 0 and label_idx < n_cols:
73
- logits_label = tl.load(logits_ptr + label_idx) * logit_scale
74
- if HAS_SMOOTHING:
75
- loss = (
76
- (lse if not SPLIT else 0.0)
77
- - smoothing * sum_logits / total_classes
78
- - (1 - smoothing) * logits_label
79
- )
80
- else:
81
- loss = (lse if not SPLIT else 0.0) - logits_label
82
- else:
83
- # If label is out of bounds, we set the CE loss to 0.0. But we still want the smoothing loss
84
- if HAS_SMOOTHING:
85
- loss = smoothing * ((lse if not SPLIT else 0.0) - sum_logits / total_classes)
86
- else:
87
- loss = 0.0
88
- if not SPLIT:
89
- z_loss = lse_square_scale * lse * lse
90
- loss += z_loss
91
- else:
92
- z_loss = 0.0
93
- tl.store(loss_ptr + row_idx, loss)
94
- if not SPLIT:
95
- tl.store(z_loss_ptr + row_idx, z_loss)
96
-
97
-
98
- @triton.heuristics(
99
- {
100
- "HAS_SMOOTHING": lambda args: args["smoothing"] > 0.0,
101
- }
102
- )
103
- @triton.jit
104
- def cross_entropy_bwd_kernel(
105
- dlogits_ptr, # data ptrs
106
- dloss_ptr,
107
- logits_ptr,
108
- lse_ptr,
109
- labels_ptr,
110
- smoothing,
111
- logit_scale,
112
- lse_square_scale,
113
- ignore_index,
114
- total_classes,
115
- class_start_idx, # Useful for tensor parallel when each rank only has a subset of classes
116
- n_cols, # shapes
117
- logits_row_stride, # strides
118
- dlogits_row_stride,
119
- dloss_row_stride,
120
- BLOCK_SIZE: tl.constexpr,
121
- HAS_SMOOTHING: tl.constexpr,
122
- ):
123
- row_idx = tl.program_id(0)
124
- col_block_idx = tl.program_id(1)
125
- logits_ptr = logits_ptr + row_idx * logits_row_stride.to(tl.int64)
126
- dlogits_ptr = dlogits_ptr + row_idx * dlogits_row_stride.to(tl.int64)
127
- col_offsets = col_block_idx * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
128
- label_idx = tl.load(labels_ptr + row_idx)
129
- if label_idx != ignore_index:
130
- dloss = tl.load(dloss_ptr + row_idx * dloss_row_stride)
131
- else:
132
- dloss = 0.0
133
- logits = tl.load(logits_ptr + col_offsets, mask=col_offsets < n_cols, other=-float("inf")).to(
134
- tl.float32
135
- ) * logit_scale
136
- lse = tl.load(lse_ptr + row_idx)
137
- probs = tl.exp(logits - lse)
138
- probs += 2.0 * lse_square_scale * lse * probs
139
- label_idx -= class_start_idx
140
- if HAS_SMOOTHING:
141
- smooth_positive = 1.0 - smoothing
142
- smooth_negative = smoothing / total_classes
143
- probs = tl.where(col_offsets == label_idx, probs - smooth_positive, probs) - smooth_negative
144
- else:
145
- probs = tl.where(col_offsets == label_idx, probs - 1.0, probs)
146
- tl.store(dlogits_ptr + col_offsets, (dloss * logit_scale) * probs, mask=col_offsets < n_cols)
147
-
148
-
149
- class CrossEntropyLoss(torch.autograd.Function):
150
-
151
- @staticmethod
152
- def forward(
153
- ctx,
154
- logits,
155
- labels,
156
- precomputed_lse=None,
157
- smoothing=0.0,
158
- logit_scale=1.0,
159
- lse_square_scale=0.0,
160
- ignore_index=-100,
161
- inplace_backward=False,
162
- process_group=None,
163
- ):
164
- # For some reason Triton generates wrong code when labels has dtype long and its address
165
- # is not aligned to 16 bytes. The ld.global.b64 seems to load the wrong label index.
166
- if labels.dtype == torch.long and labels.data_ptr() % 16 != 0:
167
- labels = F.pad(labels, (0, 1))[..., :-1]
168
- assert labels.data_ptr() % 16 == 0
169
- assert logit_scale > 0.0
170
- n_rows, n_cols = logits.shape
171
- assert labels.shape == (n_rows,)
172
- world_size = 1 if process_group is None else torch.distributed.get_world_size(process_group)
173
- total_classes = world_size * n_cols
174
- rank = 0 if process_group is None else torch.distributed.get_rank(process_group)
175
- class_start_idx = rank * n_cols
176
- use_precomputed_lse = precomputed_lse is not None and logit_scale == 1.0 and smoothing == 0.0
177
-
178
- if logits.stride(-1) != 1:
179
- logits = logits.contiguous()
180
- MAX_BLOCK_SIZE = 16 * 1024
181
- BLOCK_SIZE = min(triton.next_power_of_2(n_cols), MAX_BLOCK_SIZE)
182
- num_warps = (
183
- 4
184
- if BLOCK_SIZE < 2048
185
- else (8 if BLOCK_SIZE < 8192 else (16 if BLOCK_SIZE < 128 * 1024 else 32))
186
- )
187
- losses = torch.empty(n_rows, dtype=torch.float, device=logits.device)
188
- if use_precomputed_lse:
189
- assert precomputed_lse.shape == (n_rows,)
190
- lse = precomputed_lse.contiguous()
191
- else:
192
- lse = torch.empty(n_rows, dtype=torch.float, device=logits.device)
193
- z_losses = torch.empty(n_rows, dtype=torch.float, device=logits.device)
194
- # Need this, otherwise Triton tries to launch from cuda:0 and we get
195
- # ValueError: Pointer argument (at 0) cannot be accessed from Triton (cpu tensor?)
196
- with torch.cuda.device(logits.device.index):
197
- cross_entropy_fwd_kernel[(n_rows,)](
198
- losses, # data ptrs
199
- lse,
200
- z_losses,
201
- logits,
202
- labels,
203
- smoothing,
204
- logit_scale,
205
- lse_square_scale,
206
- ignore_index,
207
- total_classes,
208
- class_start_idx,
209
- n_cols, # shapes
210
- logits.stride(0), # strides
211
- BLOCK_SIZE=BLOCK_SIZE, # constants
212
- SPLIT=world_size > 1,
213
- PRECOMPUTED_LSE=use_precomputed_lse,
214
- num_warps=num_warps,
215
- )
216
-
217
- if world_size > 1:
218
- # If there's no smoothing, if labels are in the vocab of this partition, losses contains
219
- # - predicted logit, and 0 otherwise.
220
- # If there's smoothing=0.1, for labels in the vocab of this partition, losses contains
221
- # -0.9 * predicted logit - 0.1 * sum logit / total_classes.
222
- # For labels not in the vocab of this partition, losses contains
223
- # -0.1 * sum logit / total_classes.
224
- if world_size > 1:
225
- lse_allgather = torch.empty(world_size, n_rows, dtype=lse.dtype, device=lse.device)
226
- torch.distributed.all_gather_into_tensor(lse_allgather, lse, group=process_group)
227
- handle_losses = torch.distributed.all_reduce(
228
- losses, op=torch.distributed.ReduceOp.SUM, group=process_group, async_op=True
229
- )
230
- lse = torch.logsumexp(lse_allgather, dim=0)
231
- handle_losses.wait()
232
- # After the allreduce, if there's no smoothing, the total losses are - predicted_logit,
233
- # we just have to add the (global) lse.
234
- # If there's smoothing=0.1, the total losses are
235
- # -0.9 * predicted_logit - 0.1 * sum logit / total_classes.
236
- # Again, we just have to add the (global) lse.
237
- losses += lse
238
- if lse_square_scale != 0.0:
239
- z_losses = lse_square_scale * lse.square()
240
- z_losses.masked_fill_(labels == ignore_index, 0.0)
241
- losses += z_losses
242
- else:
243
- z_losses = torch.zeros_like(losses)
244
- losses.masked_fill_(labels == ignore_index, 0.0)
245
-
246
- ctx.save_for_backward(logits, lse, labels)
247
- ctx.mark_non_differentiable(z_losses)
248
- ctx.smoothing = smoothing
249
- ctx.logit_scale = logit_scale
250
- ctx.lse_square_scale = lse_square_scale
251
- ctx.ignore_index = ignore_index
252
- ctx.total_classes = total_classes
253
- ctx.class_start_idx = class_start_idx
254
- ctx.inplace_backward = inplace_backward
255
- return losses, z_losses
256
-
257
- @staticmethod
258
- def backward(ctx, grad_losses, grad_z_losses):
259
- del grad_z_losses # z_losses are only for logging.
260
-
261
- logits, lse, labels = ctx.saved_tensors
262
- dlogits = logits if ctx.inplace_backward else torch.empty_like(logits)
263
- n_rows, n_cols = logits.shape
264
- BLOCK_SIZE = min(triton.next_power_of_2(n_cols), 4 * 1024)
265
- num_warps = 4 if BLOCK_SIZE < 2048 else (8 if BLOCK_SIZE < 8192 else 16)
266
- grid = lambda META: (n_rows, triton.cdiv(n_cols, META["BLOCK_SIZE"])) # noqa
267
- # Need this, otherwise Triton tries to launch from cuda:0 and we get
268
- # ValueError: Pointer argument (at 0) cannot be accessed from Triton (cpu tensor?)
269
- with torch.cuda.device(logits.device.index):
270
- cross_entropy_bwd_kernel[grid](
271
- dlogits, # data ptrs
272
- grad_losses,
273
- logits,
274
- lse,
275
- labels,
276
- ctx.smoothing,
277
- ctx.logit_scale,
278
- ctx.lse_square_scale,
279
- ctx.ignore_index,
280
- ctx.total_classes,
281
- ctx.class_start_idx,
282
- n_cols, # shapes
283
- logits.stride(0), # strides
284
- dlogits.stride(0),
285
- grad_losses.stride(0),
286
- BLOCK_SIZE=BLOCK_SIZE, # constants
287
- num_warps=num_warps,
288
- )
289
- return dlogits, None, None, None, None, None, None, None, None, None
290
-
291
-
292
- def cross_entropy_loss(
293
- logits: torch.Tensor,
294
- labels: torch.Tensor,
295
- precomputed_lse: Optional[torch.Tensor] = None,
296
- label_smoothing: float = 0.0,
297
- logit_scale: float = 1.0,
298
- lse_square_scale: float = 0.0,
299
- ignore_index=-100,
300
- inplace_backward: bool = False,
301
- process_group=None,
302
- ) -> Tuple[torch.Tensor, torch.Tensor]:
303
- """
304
- Arguments:
305
- logits: (batch, vocab_size)
306
- labels: (batch,)
307
- label_smoothing: float
308
- logit_scale: float. Multiply logits by this scale before calculating the loss.
309
- lse_square_scale: float. If > 0, we add lse_square_scale * lse(logits) ^ 2 to the loss.
310
- This is also referred to as "z-loss".
311
- ignore_index: int. If labels == ignore_index, the loss is set to 0.0.
312
- inplace_backward: bool. If True, we do the backward pass in-place by modifying the logits.
313
- This saves memory.
314
- process_group: if not None, we're doing Tensor Parallel: each process is responsible for
315
- one part of the vocab. The loss will be aggregated across processes.
316
- Returns:
317
- losses: (batch,), float
318
- z_losses: (batch,), float
319
- """
320
- return CrossEntropyLoss.apply(
321
- logits,
322
- labels,
323
- precomputed_lse,
324
- label_smoothing,
325
- logit_scale,
326
- lse_square_scale,
327
- ignore_index,
328
- inplace_backward,
329
- process_group,
330
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/flash_attn/ops/triton/k_activations.py DELETED
@@ -1,162 +0,0 @@
1
- # Adapted from https://github.com/facebookresearch/xformers/blob/main/xformers/triton/k_activations.py
2
- # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
3
- #
4
- # This source code is licensed under the BSD license found in the
5
- # LICENSE file in the root directory of this source tree.
6
-
7
- import math
8
- from enum import Enum
9
- from typing import Optional
10
-
11
- import triton
12
- import triton.language as tl
13
-
14
- _sqrt2pi = math.sqrt(2.0 / math.pi)
15
- _sqrt1_2 = math.sqrt(1.0 / 2)
16
- _gaussian_pdf_normalization = 1.0 / math.sqrt(2 * math.pi)
17
-
18
-
19
- class Activation(str, Enum):
20
- SquaredReLU = "squared_relu"
21
- GeLU = "gelu"
22
- GeLUApprox = "gelu_approx"
23
- LeakyReLU = "leaky_relu"
24
- ReLU = "relu"
25
-
26
-
27
- def get_triton_activation_kernel(activation: Optional[Activation]):
28
- return (
29
- {
30
- Activation.ReLU: relu,
31
- Activation.LeakyReLU: leaky_relu,
32
- Activation.GeLU: gelu,
33
- Activation.GeLUApprox: gelu_approx,
34
- Activation.SquaredReLU: squared_relu,
35
- }[activation]
36
- if activation
37
- else None
38
- )
39
-
40
-
41
- def get_triton_activation_bwd_kernel(activation: Optional[Activation]):
42
- return (
43
- {
44
- Activation.ReLU: relu_grad,
45
- Activation.LeakyReLU: leaky_relu_grad,
46
- Activation.GeLU: gelu_grad,
47
- Activation.GeLUApprox: gelu_approx_grad,
48
- Activation.SquaredReLU: squared_relu_grad,
49
- }[activation]
50
- if activation
51
- else None
52
- )
53
-
54
-
55
- @triton.jit
56
- def tanh(x):
57
- # Tanh is just a scaled sigmoid
58
- return 2 * tl.sigmoid(2 * x) - 1
59
-
60
-
61
- @triton.jit
62
- def cosh(x):
63
- exp_x = tl.exp(x)
64
- return (exp_x + 1.0 / exp_x) * 0.5
65
-
66
-
67
- # a Triton implementation of the most used activations
68
- # See for instance http://arxiv.org/abs/1606.08415 for an overview
69
-
70
- # ReLU
71
- @triton.jit
72
- def relu(x):
73
- """
74
- ReLU_ activation function
75
-
76
- .. _ReLU: https://pytorch.org/docs/stable/generated/torch.nn.ReLU.html
77
- """
78
- zero = 0.0
79
- return tl.where(x >= 0, x, zero.to(x.dtype))
80
-
81
-
82
- @triton.jit
83
- def relu_grad(x):
84
- # ReLU is different from other activations
85
- # in that it does not require the input to retrospectively compute its gradient
86
- # here the input is the downstream gradient, and we return the upstream gradient directly
87
- zero = 0.0
88
- one = 1.0
89
- return tl.where(x >= 0, one.to(x.dtype), zero.to(x.dtype))
90
-
91
-
92
- @triton.jit
93
- def squared_relu(x):
94
- """
95
- Squared ReLU activation, as proposed in the Primer_ paper.
96
-
97
- .. _Primer: https://arxiv.org/abs/2109.08668
98
- """
99
- x_ = relu(x)
100
- return (x_ * x_).to(x.dtype)
101
-
102
-
103
- @triton.jit
104
- def squared_relu_grad(x):
105
- return tl.where(x >= 0, 2.0 * x, 0.0)
106
-
107
-
108
- # Leaky ReLU
109
- @triton.jit
110
- def leaky_relu(x):
111
- """
112
- LeakyReLU_ activation
113
-
114
- .. _LeakyReLU: https://pytorch.org/docs/stable/generated/torch.nn.LeakyReLU.html
115
- """
116
- scale = 0.01 + 0.0
117
- scale = scale.to(x.dtype)
118
- return tl.where(x >= 0, x, scale * x)
119
-
120
-
121
- @triton.jit
122
- def leaky_relu_grad(x):
123
- min_grad = 0.01
124
- max_grad = 1
125
-
126
- min_grad = min_grad.to(x.dtype)
127
- max_grad = max_grad.to(x.dtype)
128
-
129
- return tl.where(x >= 0, max_grad, min_grad)
130
-
131
-
132
- @triton.jit
133
- def gelu(x):
134
- """Gaussian Error Linear Unit (GELU)"""
135
- return x * 0.5 * (1.0 + tl.libdevice.erf(x * _sqrt1_2))
136
-
137
-
138
- @triton.jit
139
- def gelu_grad(x):
140
- cdf = 0.5 * (1.0 + tl.libdevice.erf(x * _sqrt1_2))
141
- pdf = tl.exp(-0.5 * x * x) * _gaussian_pdf_normalization
142
- return cdf + x * pdf
143
-
144
-
145
- @triton.jit
146
- def gelu_approx(x):
147
- """
148
- GeLU_ activation - Gaussian error linear unit, with tanh approximation
149
-
150
- .. _GeLU: https://arxiv.org/pdf/1606.08415.pdf
151
- """
152
- return 0.5 * x * (1.0 + tanh(_sqrt2pi * x * (1.0 + 0.044715 * x * x)))
153
-
154
-
155
- @triton.jit
156
- def gelu_approx_grad(x):
157
- # CREDITS: Fast implementation proposed in
158
- # https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/fused_bias_gelu.py#L30
159
- tanh_out = tanh(0.79788456 * x * (1 + 0.044715 * x * x))
160
- return 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (
161
- 1 + tanh_out
162
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/flash_attn/ops/triton/layer_norm.py DELETED
@@ -1,1252 +0,0 @@
1
- # Copyright (c) 2024, Tri Dao.
2
- # Implement dropout + residual + layer_norm / rms_norm.
3
-
4
- # Based on the Triton LayerNorm tutorial: https://triton-lang.org/main/getting-started/tutorials/05-layer-norm.html
5
- # For the backward pass, we keep weight_grad and bias_grad in registers and accumulate.
6
- # This is faster for dimensions up to 8k, but after that it's much slower due to register spilling.
7
- # The models we train have hidden dim up to 8k anyway (e.g. Llama 70B), so this is fine.
8
-
9
- import math
10
- from typing import Optional, List
11
-
12
- import torch
13
- import torch.nn.functional as F
14
- from torch import Tensor
15
-
16
- import triton
17
- import triton.language as tl
18
-
19
- from flash_attn.utils.torch import custom_fwd, custom_bwd
20
- from flash_attn.utils.library import triton_op
21
-
22
-
23
- def maybe_contiguous_lastdim(x):
24
- return x.contiguous() if x is not None and x.stride(-1) != 1 else x
25
-
26
-
27
- def maybe_contiguous(x):
28
- return x.contiguous() if x is not None else None
29
-
30
-
31
- def triton_autotune_configs():
32
- # Return configs with a valid warp count for the current device
33
- configs = []
34
- # Maximum threads per block is architecture-dependent in theory, but in reality all are 1024
35
- max_threads_per_block = 1024
36
- # Default to warp size 32 if not defined by device
37
- warp_size = getattr(torch.cuda.get_device_properties(torch.cuda.current_device()), "warp_size", 32)
38
- # Autotune for warp counts which are powers of 2 and do not exceed thread per block limit
39
- return [triton.Config({}, num_warps=warp_count) for warp_count in [1, 2, 4, 8, 16, 32]
40
- if warp_count * warp_size <= max_threads_per_block]
41
- # return [triton.Config({}, num_warps=8)]
42
-
43
-
44
- def layer_norm_ref(
45
- x,
46
- weight,
47
- bias,
48
- residual=None,
49
- x1=None,
50
- weight1=None,
51
- bias1=None,
52
- eps=1e-6,
53
- dropout_p=0.0,
54
- rowscale=None,
55
- prenorm=False,
56
- zero_centered_weight=False,
57
- dropout_mask=None,
58
- dropout_mask1=None,
59
- upcast=False,
60
- ):
61
- dtype = x.dtype
62
- if upcast:
63
- x = x.float()
64
- weight = weight.float()
65
- bias = bias.float() if bias is not None else None
66
- residual = residual.float() if residual is not None else residual
67
- x1 = x1.float() if x1 is not None else None
68
- weight1 = weight1.float() if weight1 is not None else None
69
- bias1 = bias1.float() if bias1 is not None else None
70
- if zero_centered_weight:
71
- weight = weight + 1.0
72
- if weight1 is not None:
73
- weight1 = weight1 + 1.0
74
- if x1 is not None:
75
- assert rowscale is None, "rowscale is not supported with parallel LayerNorm"
76
- if rowscale is not None:
77
- x = x * rowscale[..., None]
78
- if dropout_p > 0.0:
79
- if dropout_mask is not None:
80
- x = x.masked_fill(~dropout_mask, 0.0) / (1.0 - dropout_p)
81
- else:
82
- x = F.dropout(x, p=dropout_p)
83
- if x1 is not None:
84
- if dropout_mask1 is not None:
85
- x1 = x1.masked_fill(~dropout_mask1, 0.0) / (1.0 - dropout_p)
86
- else:
87
- x1 = F.dropout(x1, p=dropout_p)
88
- if x1 is not None:
89
- x = x + x1
90
- if residual is not None:
91
- x = (x + residual).to(x.dtype)
92
- out = F.layer_norm(x.to(weight.dtype), x.shape[-1:], weight=weight, bias=bias, eps=eps).to(
93
- dtype
94
- )
95
- if weight1 is None:
96
- return out if not prenorm else (out, x)
97
- else:
98
- out1 = F.layer_norm(
99
- x.to(weight1.dtype), x.shape[-1:], weight=weight1, bias=bias1, eps=eps
100
- ).to(dtype)
101
- return (out, out1) if not prenorm else (out, out1, x)
102
-
103
-
104
- def rms_norm_ref(
105
- x,
106
- weight,
107
- bias,
108
- residual=None,
109
- x1=None,
110
- weight1=None,
111
- bias1=None,
112
- eps=1e-6,
113
- dropout_p=0.0,
114
- rowscale=None,
115
- prenorm=False,
116
- zero_centered_weight=False,
117
- dropout_mask=None,
118
- dropout_mask1=None,
119
- upcast=False,
120
- ):
121
- dtype = x.dtype
122
- if upcast:
123
- x = x.float()
124
- weight = weight.float()
125
- bias = bias.float() if bias is not None else None
126
- residual = residual.float() if residual is not None else residual
127
- x1 = x1.float() if x1 is not None else None
128
- weight1 = weight1.float() if weight1 is not None else None
129
- bias1 = bias1.float() if bias1 is not None else None
130
- if zero_centered_weight:
131
- weight = weight + 1.0
132
- if weight1 is not None:
133
- weight1 = weight1 + 1.0
134
- if x1 is not None:
135
- assert rowscale is None, "rowscale is not supported with parallel LayerNorm"
136
- if rowscale is not None:
137
- x = x * rowscale[..., None]
138
- if dropout_p > 0.0:
139
- if dropout_mask is not None:
140
- x = x.masked_fill(~dropout_mask, 0.0) / (1.0 - dropout_p)
141
- else:
142
- x = F.dropout(x, p=dropout_p)
143
- if x1 is not None:
144
- if dropout_mask1 is not None:
145
- x1 = x1.masked_fill(~dropout_mask1, 0.0) / (1.0 - dropout_p)
146
- else:
147
- x1 = F.dropout(x1, p=dropout_p)
148
- if x1 is not None:
149
- x = x + x1
150
- if residual is not None:
151
- x = (x + residual).to(x.dtype)
152
- rstd = 1 / torch.sqrt((x.square()).mean(dim=-1, keepdim=True) + eps)
153
- out = ((x * rstd * weight) + bias if bias is not None else (x * rstd * weight)).to(dtype)
154
- if weight1 is None:
155
- return out if not prenorm else (out, x)
156
- else:
157
- out1 = ((x * rstd * weight1) + bias1 if bias1 is not None else (x * rstd * weight1)).to(
158
- dtype
159
- )
160
- return (out, out1) if not prenorm else (out, out1, x)
161
-
162
-
163
- @triton.autotune(
164
- configs=triton_autotune_configs(),
165
- key=["N", "HAS_RESIDUAL", "STORE_RESIDUAL_OUT", "IS_RMS_NORM", "HAS_BIAS", "HAS_X1", "HAS_W1", "HAS_B1"],
166
- )
167
- # torch compile doesn't like triton.heuristics, so we set these manually when calling the kernel
168
- # @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None})
169
- # @triton.heuristics({"HAS_RESIDUAL": lambda args: args["RESIDUAL"] is not None})
170
- # @triton.heuristics({"HAS_X1": lambda args: args["X1"] is not None})
171
- # @triton.heuristics({"HAS_W1": lambda args: args["W1"] is not None})
172
- # @triton.heuristics({"HAS_B1": lambda args: args["B1"] is not None})
173
- @triton.jit
174
- def _layer_norm_fwd_1pass_kernel(
175
- X, # pointer to the input
176
- Y, # pointer to the output
177
- W, # pointer to the weights
178
- B, # pointer to the biases
179
- RESIDUAL, # pointer to the residual
180
- X1,
181
- W1,
182
- B1,
183
- Y1,
184
- RESIDUAL_OUT, # pointer to the residual
185
- ROWSCALE,
186
- SEEDS, # Dropout seeds for each row
187
- DROPOUT_MASK,
188
- DROPOUT_MASK1,
189
- Mean, # pointer to the mean
190
- Rstd, # pointer to the 1/std
191
- stride_x_row, # how much to increase the pointer when moving by 1 row
192
- stride_y_row,
193
- stride_res_row,
194
- stride_res_out_row,
195
- stride_x1_row,
196
- stride_y1_row,
197
- M, # number of rows in X
198
- N, # number of columns in X
199
- eps, # epsilon to avoid division by zero
200
- dropout_p, # Dropout probability
201
- zero_centered_weight, # If true, add 1.0 to the weight
202
- IS_RMS_NORM: tl.constexpr,
203
- BLOCK_N: tl.constexpr,
204
- HAS_RESIDUAL: tl.constexpr,
205
- STORE_RESIDUAL_OUT: tl.constexpr,
206
- HAS_BIAS: tl.constexpr,
207
- HAS_DROPOUT: tl.constexpr,
208
- STORE_DROPOUT_MASK: tl.constexpr,
209
- HAS_ROWSCALE: tl.constexpr,
210
- HAS_X1: tl.constexpr,
211
- HAS_W1: tl.constexpr,
212
- HAS_B1: tl.constexpr,
213
- ):
214
- # Map the program id to the row of X and Y it should compute.
215
- row = tl.program_id(0)
216
- X += row * stride_x_row
217
- Y += row * stride_y_row
218
- if HAS_RESIDUAL:
219
- RESIDUAL += row * stride_res_row
220
- if STORE_RESIDUAL_OUT:
221
- RESIDUAL_OUT += row * stride_res_out_row
222
- if HAS_X1:
223
- X1 += row * stride_x1_row
224
- if HAS_W1:
225
- Y1 += row * stride_y1_row
226
- # Compute mean and variance
227
- cols = tl.arange(0, BLOCK_N)
228
- x = tl.load(X + cols, mask=cols < N, other=0.0).to(tl.float32)
229
- if HAS_ROWSCALE:
230
- rowscale = tl.load(ROWSCALE + row).to(tl.float32)
231
- x *= rowscale
232
- if HAS_DROPOUT:
233
- # Compute dropout mask
234
- # 7 rounds is good enough, and reduces register pressure
235
- keep_mask = tl.rand(tl.load(SEEDS + row).to(tl.uint32), cols, n_rounds=7) > dropout_p
236
- x = tl.where(keep_mask, x / (1.0 - dropout_p), 0.0)
237
- if STORE_DROPOUT_MASK:
238
- tl.store(DROPOUT_MASK + row * N + cols, keep_mask, mask=cols < N)
239
- if HAS_X1:
240
- x1 = tl.load(X1 + cols, mask=cols < N, other=0.0).to(tl.float32)
241
- if HAS_ROWSCALE:
242
- rowscale = tl.load(ROWSCALE + M + row).to(tl.float32)
243
- x1 *= rowscale
244
- if HAS_DROPOUT:
245
- # Compute dropout mask
246
- # 7 rounds is good enough, and reduces register pressure
247
- keep_mask = (
248
- tl.rand(tl.load(SEEDS + M + row).to(tl.uint32), cols, n_rounds=7) > dropout_p
249
- )
250
- x1 = tl.where(keep_mask, x1 / (1.0 - dropout_p), 0.0)
251
- if STORE_DROPOUT_MASK:
252
- tl.store(DROPOUT_MASK1 + row * N + cols, keep_mask, mask=cols < N)
253
- x += x1
254
- if HAS_RESIDUAL:
255
- residual = tl.load(RESIDUAL + cols, mask=cols < N, other=0.0).to(tl.float32)
256
- x += residual
257
- if STORE_RESIDUAL_OUT:
258
- tl.store(RESIDUAL_OUT + cols, x, mask=cols < N)
259
- if not IS_RMS_NORM:
260
- mean = tl.sum(x, axis=0) / N
261
- tl.store(Mean + row, mean)
262
- xbar = tl.where(cols < N, x - mean, 0.0)
263
- var = tl.sum(xbar * xbar, axis=0) / N
264
- else:
265
- xbar = tl.where(cols < N, x, 0.0)
266
- var = tl.sum(xbar * xbar, axis=0) / N
267
- rstd = 1 / tl.sqrt(var + eps)
268
- tl.store(Rstd + row, rstd)
269
- # Normalize and apply linear transformation
270
- mask = cols < N
271
- w = tl.load(W + cols, mask=mask).to(tl.float32)
272
- if zero_centered_weight:
273
- w += 1.0
274
- if HAS_BIAS:
275
- b = tl.load(B + cols, mask=mask).to(tl.float32)
276
- x_hat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd
277
- y = x_hat * w + b if HAS_BIAS else x_hat * w
278
- # Write output
279
- tl.store(Y + cols, y, mask=mask)
280
- if HAS_W1:
281
- w1 = tl.load(W1 + cols, mask=mask).to(tl.float32)
282
- if zero_centered_weight:
283
- w1 += 1.0
284
- if HAS_B1:
285
- b1 = tl.load(B1 + cols, mask=mask).to(tl.float32)
286
- y1 = x_hat * w1 + b1 if HAS_B1 else x_hat * w1
287
- tl.store(Y1 + cols, y1, mask=mask)
288
-
289
-
290
- def _layer_norm_fwd(
291
- x: Tensor,
292
- weight: Tensor,
293
- bias: Tensor,
294
- eps: float,
295
- residual: Optional[Tensor] = None,
296
- x1: Optional[Tensor] = None,
297
- weight1: Optional[Tensor] = None,
298
- bias1: Optional[Tensor] = None,
299
- dropout_p: float = 0.0,
300
- rowscale: Optional[Tensor] = None,
301
- out_dtype: Optional[torch.dtype] = None,
302
- residual_dtype: Optional[torch.dtype] = None,
303
- zero_centered_weight: bool = False,
304
- is_rms_norm: bool = False,
305
- return_dropout_mask: bool = False,
306
- out: Optional[Tensor] = None,
307
- residual_out: Optional[Tensor] = None
308
- ) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor):
309
- # Need to wrap to handle the case where residual_out is a alias of x, which makes torch.library
310
- # and torch.compile unhappy. Also allocate memory for out and residual_out if they are None
311
- # so that _layer_norm_fwd_impl doesn't have to return them.
312
- if out is None:
313
- out = torch.empty_like(x, dtype=x.dtype if out_dtype is None else out_dtype)
314
- if residual is not None:
315
- residual_dtype = residual.dtype
316
- if residual_out is None and (
317
- residual is not None
318
- or (residual_dtype is not None and residual_dtype != x.dtype)
319
- or dropout_p > 0.0
320
- or rowscale is not None
321
- or x1 is not None
322
- ):
323
- residual_out = torch.empty_like(
324
- x, dtype=residual_dtype if residual_dtype is not None else x.dtype
325
- )
326
- else:
327
- residual_out = None
328
- y1, mean, rstd, seeds, dropout_mask, dropout_mask1 = _layer_norm_fwd_impl(
329
- x,
330
- weight,
331
- bias,
332
- eps,
333
- out,
334
- residual=residual,
335
- x1=x1,
336
- weight1=weight1,
337
- bias1=bias1,
338
- dropout_p=dropout_p,
339
- rowscale=rowscale,
340
- zero_centered_weight=zero_centered_weight,
341
- is_rms_norm=is_rms_norm,
342
- return_dropout_mask=return_dropout_mask,
343
- residual_out=residual_out,
344
- )
345
- # residual_out is None if residual is None and residual_dtype == input_dtype and dropout_p == 0.0
346
- if residual_out is None:
347
- residual_out = x
348
- return out, y1, mean, rstd, residual_out, seeds, dropout_mask, dropout_mask1
349
-
350
-
351
- # [2025-04-28] torch.library.triton_op ignores the schema argument, but here we need the schema
352
- # since we're returning a tuple of tensors
353
- @triton_op("flash_attn::layer_norm_fwd_impl", mutates_args={"out", "residual_out"},
354
- schema="(Tensor x, Tensor weight, Tensor bias, float eps, Tensor(a!) out, Tensor? residual, Tensor? x1, Tensor? weight1, Tensor? bias1, float dropout_p, Tensor? rowscale, bool zero_centered_weight, bool is_rms_norm, bool return_dropout_mask, Tensor(a!)? residual_out) -> (Tensor y1, Tensor mean, Tensor rstd, Tensor seeds, Tensor dropout_mask, Tensor dropout_mask1)")
355
- def _layer_norm_fwd_impl(
356
- x: Tensor,
357
- weight: Tensor,
358
- bias: Tensor,
359
- eps: float,
360
- out: Tensor,
361
- residual: Optional[Tensor] = None,
362
- x1: Optional[Tensor] = None,
363
- weight1: Optional[Tensor] = None,
364
- bias1: Optional[Tensor] = None,
365
- dropout_p: float = 0.0,
366
- rowscale: Optional[Tensor] = None,
367
- zero_centered_weight: bool = False,
368
- is_rms_norm: bool = False,
369
- return_dropout_mask: bool = False,
370
- residual_out: Optional[Tensor] = None
371
- ) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor):
372
- M, N = x.shape
373
- assert x.stride(-1) == 1
374
- if residual is not None:
375
- assert residual.stride(-1) == 1
376
- assert residual.shape == (M, N)
377
- assert weight.shape == (N,)
378
- assert weight.stride(-1) == 1
379
- if bias is not None:
380
- assert bias.stride(-1) == 1
381
- assert bias.shape == (N,)
382
- if x1 is not None:
383
- assert x1.shape == x.shape
384
- assert rowscale is None
385
- assert x1.stride(-1) == 1
386
- if weight1 is not None:
387
- assert weight1.shape == (N,)
388
- assert weight1.stride(-1) == 1
389
- if bias1 is not None:
390
- assert bias1.shape == (N,)
391
- assert bias1.stride(-1) == 1
392
- if rowscale is not None:
393
- assert rowscale.is_contiguous()
394
- assert rowscale.shape == (M,)
395
- assert out.shape == x.shape
396
- assert out.stride(-1) == 1
397
- if residual_out is not None:
398
- assert residual_out.shape == x.shape
399
- assert residual_out.stride(-1) == 1
400
- if weight1 is not None:
401
- y1 = torch.empty_like(out)
402
- assert y1.stride(-1) == 1
403
- else:
404
- y1 = None
405
- mean = torch.empty((M,), dtype=torch.float32, device=x.device) if not is_rms_norm else None
406
- rstd = torch.empty((M,), dtype=torch.float32, device=x.device)
407
- if dropout_p > 0.0:
408
- seeds = torch.randint(
409
- 2**32, (M if x1 is None else 2 * M,), device=x.device, dtype=torch.int64
410
- )
411
- else:
412
- seeds = None
413
- if return_dropout_mask and dropout_p > 0.0:
414
- dropout_mask = torch.empty(M, N, device=x.device, dtype=torch.bool)
415
- if x1 is not None:
416
- dropout_mask1 = torch.empty(M, N, device=x.device, dtype=torch.bool)
417
- else:
418
- dropout_mask1 = None
419
- else:
420
- dropout_mask, dropout_mask1 = None, None
421
- # Less than 64KB per feature: enqueue fused kernel
422
- MAX_FUSED_SIZE = 65536 // x.element_size()
423
- BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
424
- if N > BLOCK_N:
425
- raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
426
- with torch.cuda.device(x.device.index):
427
- torch.library.wrap_triton(_layer_norm_fwd_1pass_kernel)[(M,)](
428
- x,
429
- out,
430
- weight,
431
- bias,
432
- residual,
433
- x1,
434
- weight1,
435
- bias1,
436
- y1,
437
- residual_out,
438
- rowscale,
439
- seeds,
440
- dropout_mask,
441
- dropout_mask1,
442
- mean,
443
- rstd,
444
- x.stride(0),
445
- out.stride(0),
446
- residual.stride(0) if residual is not None else 0,
447
- residual_out.stride(0) if residual_out is not None else 0,
448
- x1.stride(0) if x1 is not None else 0,
449
- y1.stride(0) if y1 is not None else 0,
450
- M,
451
- N,
452
- eps,
453
- dropout_p,
454
- # Passing bool make torch inductor very unhappy since it then tries to compare to int_max
455
- int(zero_centered_weight),
456
- is_rms_norm,
457
- BLOCK_N,
458
- residual is not None,
459
- residual_out is not None,
460
- bias is not None,
461
- dropout_p > 0.0,
462
- dropout_mask is not None,
463
- rowscale is not None,
464
- HAS_X1=x1 is not None,
465
- HAS_W1=weight1 is not None,
466
- HAS_B1=bias1 is not None,
467
- )
468
- return y1, mean, rstd, seeds, dropout_mask, dropout_mask1
469
-
470
-
471
- @triton.autotune(
472
- configs=triton_autotune_configs(),
473
- key=["N", "HAS_DRESIDUAL", "STORE_DRESIDUAL", "IS_RMS_NORM", "HAS_BIAS", "HAS_DROPOUT"],
474
- )
475
- # torch compile doesn't like triton.heuristics, so we set these manually when calling the kernel
476
- # @triton.heuristics({"HAS_BIAS": lambda args: args["B"] is not None})
477
- # @triton.heuristics({"HAS_DRESIDUAL": lambda args: args["DRESIDUAL"] is not None})
478
- # @triton.heuristics({"STORE_DRESIDUAL": lambda args: args["DRESIDUAL_IN"] is not None})
479
- # @triton.heuristics({"HAS_ROWSCALE": lambda args: args["ROWSCALE"] is not None})
480
- # @triton.heuristics({"HAS_DY1": lambda args: args["DY1"] is not None})
481
- # @triton.heuristics({"HAS_DX1": lambda args: args["DX1"] is not None})
482
- # @triton.heuristics({"HAS_B1": lambda args: args["DB1"] is not None})
483
- # @triton.heuristics({"RECOMPUTE_OUTPUT": lambda args: args["Y"] is not None})
484
- @triton.jit
485
- def _layer_norm_bwd_kernel(
486
- X, # pointer to the input
487
- W, # pointer to the weights
488
- B, # pointer to the biases
489
- Y, # pointer to the output to be recomputed
490
- DY, # pointer to the output gradient
491
- DX, # pointer to the input gradient
492
- DW, # pointer to the partial sum of weights gradient
493
- DB, # pointer to the partial sum of biases gradient
494
- DRESIDUAL,
495
- W1,
496
- DY1,
497
- DX1,
498
- DW1,
499
- DB1,
500
- DRESIDUAL_IN,
501
- ROWSCALE,
502
- SEEDS,
503
- Mean, # pointer to the mean
504
- Rstd, # pointer to the 1/std
505
- stride_x_row, # how much to increase the pointer when moving by 1 row
506
- stride_y_row,
507
- stride_dy_row,
508
- stride_dx_row,
509
- stride_dres_row,
510
- stride_dy1_row,
511
- stride_dx1_row,
512
- stride_dres_in_row,
513
- M, # number of rows in X
514
- N, # number of columns in X
515
- eps, # epsilon to avoid division by zero
516
- dropout_p,
517
- zero_centered_weight,
518
- rows_per_program,
519
- IS_RMS_NORM: tl.constexpr,
520
- BLOCK_N: tl.constexpr,
521
- HAS_DRESIDUAL: tl.constexpr,
522
- STORE_DRESIDUAL: tl.constexpr,
523
- HAS_BIAS: tl.constexpr,
524
- HAS_DROPOUT: tl.constexpr,
525
- HAS_ROWSCALE: tl.constexpr,
526
- HAS_DY1: tl.constexpr,
527
- HAS_DX1: tl.constexpr,
528
- HAS_B1: tl.constexpr,
529
- RECOMPUTE_OUTPUT: tl.constexpr,
530
- ):
531
- # Map the program id to the elements of X, DX, and DY it should compute.
532
- row_block_id = tl.program_id(0)
533
- row_start = row_block_id * rows_per_program
534
- # Do not early exit if row_start >= M, because we need to write DW and DB
535
- cols = tl.arange(0, BLOCK_N)
536
- mask = cols < N
537
- X += row_start * stride_x_row
538
- if HAS_DRESIDUAL:
539
- DRESIDUAL += row_start * stride_dres_row
540
- if STORE_DRESIDUAL:
541
- DRESIDUAL_IN += row_start * stride_dres_in_row
542
- DY += row_start * stride_dy_row
543
- DX += row_start * stride_dx_row
544
- if HAS_DY1:
545
- DY1 += row_start * stride_dy1_row
546
- if HAS_DX1:
547
- DX1 += row_start * stride_dx1_row
548
- if RECOMPUTE_OUTPUT:
549
- Y += row_start * stride_y_row
550
- w = tl.load(W + cols, mask=mask).to(tl.float32)
551
- if zero_centered_weight:
552
- w += 1.0
553
- if RECOMPUTE_OUTPUT and HAS_BIAS:
554
- b = tl.load(B + cols, mask=mask, other=0.0).to(tl.float32)
555
- if HAS_DY1:
556
- w1 = tl.load(W1 + cols, mask=mask).to(tl.float32)
557
- if zero_centered_weight:
558
- w1 += 1.0
559
- dw = tl.zeros((BLOCK_N,), dtype=tl.float32)
560
- if HAS_BIAS:
561
- db = tl.zeros((BLOCK_N,), dtype=tl.float32)
562
- if HAS_DY1:
563
- dw1 = tl.zeros((BLOCK_N,), dtype=tl.float32)
564
- if HAS_B1:
565
- db1 = tl.zeros((BLOCK_N,), dtype=tl.float32)
566
- row_end = min((row_block_id + 1) * rows_per_program, M)
567
- for row in range(row_start, row_end):
568
- # Load data to SRAM
569
- x = tl.load(X + cols, mask=mask, other=0).to(tl.float32)
570
- dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32)
571
- if HAS_DY1:
572
- dy1 = tl.load(DY1 + cols, mask=mask, other=0).to(tl.float32)
573
- if not IS_RMS_NORM:
574
- mean = tl.load(Mean + row)
575
- rstd = tl.load(Rstd + row)
576
- # Compute dx
577
- xhat = (x - mean) * rstd if not IS_RMS_NORM else x * rstd
578
- xhat = tl.where(mask, xhat, 0.0)
579
- if RECOMPUTE_OUTPUT:
580
- y = xhat * w + b if HAS_BIAS else xhat * w
581
- tl.store(Y + cols, y, mask=mask)
582
- wdy = w * dy
583
- dw += dy * xhat
584
- if HAS_BIAS:
585
- db += dy
586
- if HAS_DY1:
587
- wdy += w1 * dy1
588
- dw1 += dy1 * xhat
589
- if HAS_B1:
590
- db1 += dy1
591
- if not IS_RMS_NORM:
592
- c1 = tl.sum(xhat * wdy, axis=0) / N
593
- c2 = tl.sum(wdy, axis=0) / N
594
- dx = (wdy - (xhat * c1 + c2)) * rstd
595
- else:
596
- c1 = tl.sum(xhat * wdy, axis=0) / N
597
- dx = (wdy - xhat * c1) * rstd
598
- if HAS_DRESIDUAL:
599
- dres = tl.load(DRESIDUAL + cols, mask=mask, other=0).to(tl.float32)
600
- dx += dres
601
- # Write dx
602
- if STORE_DRESIDUAL:
603
- tl.store(DRESIDUAL_IN + cols, dx, mask=mask)
604
- if HAS_DX1:
605
- if HAS_DROPOUT:
606
- keep_mask = (
607
- tl.rand(tl.load(SEEDS + M + row).to(tl.uint32), cols, n_rounds=7) > dropout_p
608
- )
609
- dx1 = tl.where(keep_mask, dx / (1.0 - dropout_p), 0.0)
610
- else:
611
- dx1 = dx
612
- tl.store(DX1 + cols, dx1, mask=mask)
613
- if HAS_DROPOUT:
614
- keep_mask = tl.rand(tl.load(SEEDS + row).to(tl.uint32), cols, n_rounds=7) > dropout_p
615
- dx = tl.where(keep_mask, dx / (1.0 - dropout_p), 0.0)
616
- if HAS_ROWSCALE:
617
- rowscale = tl.load(ROWSCALE + row).to(tl.float32)
618
- dx *= rowscale
619
- tl.store(DX + cols, dx, mask=mask)
620
-
621
- X += stride_x_row
622
- if HAS_DRESIDUAL:
623
- DRESIDUAL += stride_dres_row
624
- if STORE_DRESIDUAL:
625
- DRESIDUAL_IN += stride_dres_in_row
626
- if RECOMPUTE_OUTPUT:
627
- Y += stride_y_row
628
- DY += stride_dy_row
629
- DX += stride_dx_row
630
- if HAS_DY1:
631
- DY1 += stride_dy1_row
632
- if HAS_DX1:
633
- DX1 += stride_dx1_row
634
- tl.store(DW + row_block_id * N + cols, dw, mask=mask)
635
- if HAS_BIAS:
636
- tl.store(DB + row_block_id * N + cols, db, mask=mask)
637
- if HAS_DY1:
638
- tl.store(DW1 + row_block_id * N + cols, dw1, mask=mask)
639
- if HAS_B1:
640
- tl.store(DB1 + row_block_id * N + cols, db1, mask=mask)
641
-
642
-
643
- def _layer_norm_bwd(
644
- dy: Tensor,
645
- x: Tensor,
646
- weight: Tensor,
647
- bias: Tensor,
648
- eps: float,
649
- mean: Tensor,
650
- rstd: Tensor,
651
- dresidual: Optional[Tensor] = None,
652
- dy1: Optional[Tensor] = None,
653
- weight1: Optional[Tensor] = None,
654
- bias1: Optional[Tensor] = None,
655
- seeds: Optional[Tensor] = None,
656
- dropout_p: float = 0.0,
657
- rowscale: Optional[Tensor] = None,
658
- has_residual: bool = False,
659
- has_x1: bool = False,
660
- zero_centered_weight: bool = False,
661
- is_rms_norm: bool = False,
662
- x_dtype: Optional[torch.dtype] = None,
663
- recompute_output: bool = False,
664
- ) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor):
665
- # Need to wrap to handle the case where dresidual_in or dx1 are aliases of x,
666
- # which makes torch.library unhappy
667
- dx, dw, db, dresidual_in, dx1, dw1, db1, y = _layer_norm_bwd_impl(
668
- dy,
669
- x,
670
- weight,
671
- bias,
672
- eps,
673
- mean,
674
- rstd,
675
- dresidual,
676
- dy1,
677
- weight1,
678
- bias1,
679
- seeds,
680
- dropout_p,
681
- rowscale,
682
- has_residual,
683
- has_x1,
684
- zero_centered_weight,
685
- is_rms_norm,
686
- x_dtype=x_dtype,
687
- recompute_output=recompute_output,
688
- )
689
- # Don't need to compute dresidual_in separately in this case
690
- if has_residual and dx.dtype == x.dtype and dropout_p == 0.0 and rowscale is None:
691
- dresidual_in = dx
692
- if has_x1 and dropout_p == 0.0:
693
- dx1 = dx
694
- return dx, dw, db, dresidual_in, dx1, dw1, db1, y
695
-
696
-
697
-
698
- @triton_op("flash_attn::layer_norm_bwd_impl", mutates_args={},
699
- schema="(Tensor dy, Tensor x, Tensor weight, Tensor bias, float eps, Tensor mean, Tensor rstd, Tensor? dresidual, Tensor? dy1, Tensor? weight1, Tensor? bias1, Tensor? seeds, float dropout_p, Tensor? rowscale, bool has_residual, bool has_x1, bool zero_centered_weight, bool is_rms_norm, ScalarType? x_dtype, bool recompute_output) -> (Tensor dx, Tensor dw, Tensor db, Tensor dresidual_in, Tensor dx1, Tensor dw1, Tensor db1, Tensor y)",
700
- allow_decomposition=False, # Don't let torch.compile trace inside
701
- )
702
- def _layer_norm_bwd_impl(
703
- dy: Tensor,
704
- x: Tensor,
705
- weight: Tensor,
706
- bias: Tensor,
707
- eps: float,
708
- mean: Tensor,
709
- rstd: Tensor,
710
- dresidual: Optional[Tensor] = None,
711
- dy1: Optional[Tensor] = None,
712
- weight1: Optional[Tensor] = None,
713
- bias1: Optional[Tensor] = None,
714
- seeds: Optional[Tensor] = None,
715
- dropout_p: float = 0.0,
716
- rowscale: Optional[Tensor] = None,
717
- has_residual: bool = False,
718
- has_x1: bool = False,
719
- zero_centered_weight: bool = False,
720
- is_rms_norm: bool = False,
721
- x_dtype: Optional[torch.dtype] = None,
722
- recompute_output: bool = False,
723
- ) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor, Tensor):
724
- M, N = x.shape
725
- assert x.stride(-1) == 1
726
- dy = maybe_contiguous_lastdim(dy)
727
- assert dy.stride(-1) == 1
728
- assert dy.shape == (M, N)
729
- if dresidual is not None:
730
- dresidual = maybe_contiguous_lastdim(dresidual)
731
- assert dresidual.stride(-1) == 1
732
- assert dresidual.shape == (M, N)
733
- assert weight.shape == (N,)
734
- assert weight.stride(-1) == 1
735
- if bias is not None:
736
- assert bias.stride(-1) == 1
737
- assert bias.shape == (N,)
738
- if dy1 is not None:
739
- dy1 = maybe_contiguous_lastdim(dy1)
740
- assert weight1 is not None
741
- assert dy1.shape == dy.shape
742
- assert dy1.stride(-1) == 1
743
- if weight1 is not None:
744
- assert weight1.shape == (N,)
745
- assert weight1.stride(-1) == 1
746
- if bias1 is not None:
747
- assert bias1.shape == (N,)
748
- assert bias1.stride(-1) == 1
749
- if seeds is not None:
750
- assert seeds.is_contiguous()
751
- assert seeds.shape == (M if not has_x1 else M * 2,)
752
- if rowscale is not None:
753
- assert rowscale.is_contiguous()
754
- assert rowscale.shape == (M,)
755
- # allocate output
756
- dx = (
757
- torch.empty_like(x)
758
- if x_dtype is None
759
- else torch.empty(M, N, dtype=x_dtype, device=x.device)
760
- )
761
- dresidual_in = (
762
- torch.empty_like(x)
763
- if has_residual
764
- and (dx.dtype != x.dtype or dropout_p > 0.0 or rowscale is not None or has_x1)
765
- else None
766
- )
767
- dx1 = torch.empty_like(dx) if (has_x1 and dropout_p > 0.0) else None
768
- y = torch.empty(M, N, dtype=dy.dtype, device=dy.device) if recompute_output else None
769
- if recompute_output:
770
- assert weight1 is None, "recompute_output is not supported with parallel LayerNorm"
771
-
772
- # Less than 64KB per feature: enqueue fused kernel
773
- MAX_FUSED_SIZE = 65536 // x.element_size()
774
- BLOCK_N = min(MAX_FUSED_SIZE, triton.next_power_of_2(N))
775
- if N > BLOCK_N:
776
- raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.")
777
- # Increasing the multiple (e.g. 8) will allow more thread blocks to be launched and hide the
778
- # latency of the gmem reads/writes, but will increase the time of summing up dw / db.
779
- sm_count = torch.cuda.get_device_properties(x.device).multi_processor_count * 8
780
- _dw = torch.empty((sm_count, N), dtype=torch.float32, device=weight.device)
781
- _db = (
782
- torch.empty((sm_count, N), dtype=torch.float32, device=bias.device)
783
- if bias is not None
784
- else None
785
- )
786
- _dw1 = torch.empty_like(_dw) if weight1 is not None else None
787
- _db1 = torch.empty_like(_db) if bias1 is not None else None
788
- rows_per_program = math.ceil(M / sm_count)
789
- grid = (sm_count,)
790
- with torch.cuda.device(x.device.index):
791
- torch.library.wrap_triton(_layer_norm_bwd_kernel)[grid](
792
- x,
793
- weight,
794
- bias,
795
- y,
796
- dy,
797
- dx,
798
- _dw,
799
- _db,
800
- dresidual,
801
- weight1,
802
- dy1,
803
- dx1,
804
- _dw1,
805
- _db1,
806
- dresidual_in,
807
- rowscale,
808
- seeds,
809
- mean,
810
- rstd,
811
- x.stride(0),
812
- 0 if not recompute_output else y.stride(0),
813
- dy.stride(0),
814
- dx.stride(0),
815
- dresidual.stride(0) if dresidual is not None else 0,
816
- dy1.stride(0) if dy1 is not None else 0,
817
- dx1.stride(0) if dx1 is not None else 0,
818
- dresidual_in.stride(0) if dresidual_in is not None else 0,
819
- M,
820
- N,
821
- eps,
822
- dropout_p,
823
- # Passing bool make torch inductor very unhappy since it then tries to compare to int_max
824
- int(zero_centered_weight),
825
- rows_per_program,
826
- is_rms_norm,
827
- BLOCK_N,
828
- dresidual is not None,
829
- dresidual_in is not None,
830
- bias is not None,
831
- dropout_p > 0.0,
832
- HAS_ROWSCALE=rowscale is not None,
833
- HAS_DY1=dy1 is not None,
834
- HAS_DX1=dx1 is not None,
835
- HAS_B1=bias1 is not None,
836
- RECOMPUTE_OUTPUT=y is not None,
837
- )
838
- dw = _dw.sum(0).to(weight.dtype)
839
- db = _db.sum(0).to(bias.dtype) if bias is not None else None
840
- dw1 = _dw1.sum(0).to(weight1.dtype) if weight1 is not None else None
841
- db1 = _db1.sum(0).to(bias1.dtype) if bias1 is not None else None
842
- # dresidual_in and dx1 could be None, the wrapper will handle assigning them from dx
843
- return dx, dw, db, dresidual_in, dx1, dw1, db1, y
844
-
845
-
846
- class LayerNormFn(torch.autograd.Function):
847
-
848
- @staticmethod
849
- def forward(
850
- ctx,
851
- x,
852
- weight,
853
- bias,
854
- residual=None,
855
- x1=None,
856
- weight1=None,
857
- bias1=None,
858
- eps=1e-6,
859
- dropout_p=0.0,
860
- rowscale=None,
861
- prenorm=False,
862
- residual_in_fp32=False,
863
- zero_centered_weight=False,
864
- is_rms_norm=False,
865
- return_dropout_mask=False,
866
- out_dtype=None,
867
- out=None,
868
- residual_out=None
869
- ):
870
- x_shape_og = x.shape
871
- # reshape input data into 2D tensor
872
- x = maybe_contiguous_lastdim(x.reshape(-1, x.shape[-1]))
873
- if residual is not None:
874
- assert residual.shape == x_shape_og
875
- residual = maybe_contiguous_lastdim(residual.reshape(-1, residual.shape[-1]))
876
- if x1 is not None:
877
- assert x1.shape == x_shape_og
878
- assert rowscale is None, "rowscale is not supported with parallel LayerNorm"
879
- x1 = maybe_contiguous_lastdim(x1.reshape(-1, x1.shape[-1]))
880
- weight = weight.contiguous()
881
- bias = maybe_contiguous(bias)
882
- weight1 = maybe_contiguous(weight1)
883
- bias1 = maybe_contiguous(bias1)
884
- if rowscale is not None:
885
- rowscale = rowscale.reshape(-1).contiguous()
886
- residual_dtype = (
887
- residual.dtype
888
- if residual is not None
889
- else (torch.float32 if residual_in_fp32 else None)
890
- )
891
- if out is not None:
892
- out = out.reshape(-1, out.shape[-1])
893
- if residual_out is not None:
894
- residual_out = residual_out.reshape(-1, residual_out.shape[-1])
895
- y, y1, mean, rstd, residual_out, seeds, dropout_mask, dropout_mask1 = _layer_norm_fwd(
896
- x,
897
- weight,
898
- bias,
899
- eps,
900
- residual,
901
- x1,
902
- weight1,
903
- bias1,
904
- dropout_p=dropout_p,
905
- rowscale=rowscale,
906
- out_dtype=out_dtype,
907
- residual_dtype=residual_dtype,
908
- zero_centered_weight=zero_centered_weight,
909
- is_rms_norm=is_rms_norm,
910
- return_dropout_mask=return_dropout_mask,
911
- out=out,
912
- residual_out=residual_out,
913
- )
914
- ctx.save_for_backward(
915
- residual_out, weight, bias, weight1, bias1, rowscale, seeds, mean, rstd
916
- )
917
- ctx.x_shape_og = x_shape_og
918
- ctx.eps = eps
919
- ctx.dropout_p = dropout_p
920
- ctx.is_rms_norm = is_rms_norm
921
- ctx.has_residual = residual is not None
922
- ctx.has_x1 = x1 is not None
923
- ctx.prenorm = prenorm
924
- ctx.x_dtype = x.dtype
925
- ctx.zero_centered_weight = zero_centered_weight
926
- y = y.reshape(x_shape_og)
927
- y1 = y1.reshape(x_shape_og) if y1 is not None else None
928
- residual_out = residual_out.reshape(x_shape_og) if residual_out is not None else None
929
- dropout_mask = dropout_mask.reshape(x_shape_og) if dropout_mask is not None else None
930
- dropout_mask1 = dropout_mask1.reshape(x_shape_og) if dropout_mask1 is not None else None
931
- if not return_dropout_mask:
932
- if weight1 is None:
933
- return y if not prenorm else (y, residual_out)
934
- else:
935
- return (y, y1) if not prenorm else (y, y1, residual_out)
936
- else:
937
- if weight1 is None:
938
- return (
939
- (y, dropout_mask, dropout_mask1)
940
- if not prenorm
941
- else (y, residual_out, dropout_mask, dropout_mask1)
942
- )
943
- else:
944
- return (
945
- (y, y1, dropout_mask, dropout_mask1)
946
- if not prenorm
947
- else (y, y1, residual_out, dropout_mask, dropout_mask1)
948
- )
949
-
950
- @staticmethod
951
- def backward(ctx, dy, *args):
952
- x, weight, bias, weight1, bias1, rowscale, seeds, mean, rstd = ctx.saved_tensors
953
- dy = dy.reshape(-1, dy.shape[-1])
954
- if weight1 is not None:
955
- dy1, args = args[0], args[1:]
956
- dy1 = dy1.reshape(-1, dy1.shape[-1])
957
- assert dy1.shape == x.shape
958
- else:
959
- dy1 = None
960
- if ctx.prenorm:
961
- dresidual = args[0]
962
- dresidual = dresidual.reshape(-1, dresidual.shape[-1])
963
- assert dresidual.shape == x.shape
964
- else:
965
- dresidual = None
966
- dx, dw, db, dresidual_in, dx1, dw1, db1, _ = _layer_norm_bwd(
967
- dy,
968
- x,
969
- weight,
970
- bias,
971
- ctx.eps,
972
- mean,
973
- rstd,
974
- dresidual,
975
- dy1,
976
- weight1,
977
- bias1,
978
- seeds,
979
- ctx.dropout_p,
980
- rowscale,
981
- ctx.has_residual,
982
- ctx.has_x1,
983
- ctx.zero_centered_weight,
984
- ctx.is_rms_norm,
985
- x_dtype=ctx.x_dtype,
986
- recompute_output=False,
987
- )
988
- return (
989
- dx.reshape(ctx.x_shape_og),
990
- dw,
991
- db,
992
- dresidual_in.reshape(ctx.x_shape_og) if ctx.has_residual else None,
993
- dx1.reshape(ctx.x_shape_og) if dx1 is not None else None,
994
- dw1,
995
- db1,
996
- None,
997
- None,
998
- None,
999
- None,
1000
- None,
1001
- None,
1002
- None,
1003
- None,
1004
- None,
1005
- None,
1006
- None,
1007
- )
1008
-
1009
-
1010
- def layer_norm_fn(
1011
- x,
1012
- weight,
1013
- bias,
1014
- residual=None,
1015
- x1=None,
1016
- weight1=None,
1017
- bias1=None,
1018
- eps=1e-6,
1019
- dropout_p=0.0,
1020
- rowscale=None,
1021
- prenorm=False,
1022
- residual_in_fp32=False,
1023
- zero_centered_weight=False,
1024
- is_rms_norm=False,
1025
- return_dropout_mask=False,
1026
- out_dtype=None,
1027
- out=None,
1028
- residual_out=None
1029
- ):
1030
- return LayerNormFn.apply(
1031
- x,
1032
- weight,
1033
- bias,
1034
- residual,
1035
- x1,
1036
- weight1,
1037
- bias1,
1038
- eps,
1039
- dropout_p,
1040
- rowscale,
1041
- prenorm,
1042
- residual_in_fp32,
1043
- zero_centered_weight,
1044
- is_rms_norm,
1045
- return_dropout_mask,
1046
- out_dtype,
1047
- out,
1048
- residual_out
1049
- )
1050
-
1051
-
1052
- def rms_norm_fn(
1053
- x,
1054
- weight,
1055
- bias,
1056
- residual=None,
1057
- x1=None,
1058
- weight1=None,
1059
- bias1=None,
1060
- eps=1e-6,
1061
- dropout_p=0.0,
1062
- rowscale=None,
1063
- prenorm=False,
1064
- residual_in_fp32=False,
1065
- zero_centered_weight=False,
1066
- return_dropout_mask=False,
1067
- out_dtype=None,
1068
- out=None,
1069
- residual_out=None
1070
- ):
1071
- return LayerNormFn.apply(
1072
- x,
1073
- weight,
1074
- bias,
1075
- residual,
1076
- x1,
1077
- weight1,
1078
- bias1,
1079
- eps,
1080
- dropout_p,
1081
- rowscale,
1082
- prenorm,
1083
- residual_in_fp32,
1084
- zero_centered_weight,
1085
- True,
1086
- return_dropout_mask,
1087
- out_dtype,
1088
- out,
1089
- residual_out
1090
- )
1091
-
1092
-
1093
- class RMSNorm(torch.nn.Module):
1094
-
1095
- def __init__(self, hidden_size, eps=1e-5, dropout_p=0.0, zero_centered_weight=False,
1096
- device=None, dtype=None):
1097
- factory_kwargs = {"device": device, "dtype": dtype}
1098
- super().__init__()
1099
- self.eps = eps
1100
- if dropout_p > 0.0:
1101
- self.drop = torch.nn.Dropout(dropout_p)
1102
- else:
1103
- self.drop = None
1104
- self.zero_centered_weight = zero_centered_weight
1105
- self.weight = torch.nn.Parameter(torch.empty(hidden_size, **factory_kwargs))
1106
- self.register_parameter("bias", None)
1107
- self.reset_parameters()
1108
-
1109
- def reset_parameters(self):
1110
- if not self.zero_centered_weight:
1111
- torch.nn.init.ones_(self.weight)
1112
- else:
1113
- torch.nn.init.zeros_(self.weight)
1114
-
1115
- def forward(self, x, residual=None, prenorm=False, residual_in_fp32=False):
1116
- return rms_norm_fn(
1117
- x,
1118
- self.weight,
1119
- self.bias,
1120
- residual=residual,
1121
- eps=self.eps,
1122
- dropout_p=self.drop.p if self.drop is not None and self.training else 0.0,
1123
- prenorm=prenorm,
1124
- residual_in_fp32=residual_in_fp32,
1125
- zero_centered_weight=self.zero_centered_weight,
1126
- )
1127
-
1128
-
1129
- class LayerNormLinearFn(torch.autograd.Function):
1130
-
1131
- @staticmethod
1132
- @custom_fwd
1133
- def forward(
1134
- ctx,
1135
- x,
1136
- norm_weight,
1137
- norm_bias,
1138
- linear_weight,
1139
- linear_bias,
1140
- residual=None,
1141
- eps=1e-6,
1142
- prenorm=False,
1143
- residual_in_fp32=False,
1144
- is_rms_norm=False,
1145
- ):
1146
- x_shape_og = x.shape
1147
- # reshape input data into 2D tensor
1148
- x = maybe_contiguous_lastdim(x.reshape(-1, x.shape[-1]))
1149
- if residual is not None:
1150
- assert residual.shape == x_shape_og
1151
- residual = maybe_contiguous_lastdim(residual.reshape(-1, residual.shape[-1]))
1152
- norm_weight = norm_weight.contiguous()
1153
- norm_bias = maybe_contiguous(norm_bias)
1154
- residual_dtype = (
1155
- residual.dtype
1156
- if residual is not None
1157
- else (torch.float32 if residual_in_fp32 else None)
1158
- )
1159
- y, _, mean, rstd, residual_out, *rest = _layer_norm_fwd(
1160
- x,
1161
- norm_weight,
1162
- norm_bias,
1163
- eps,
1164
- residual,
1165
- out_dtype=None if not torch.is_autocast_enabled() else torch.get_autocast_dtype("cuda"),
1166
- residual_dtype=residual_dtype,
1167
- is_rms_norm=is_rms_norm,
1168
- )
1169
- y = y.reshape(x_shape_og)
1170
- dtype = torch.get_autocast_dtype("cuda") if torch.is_autocast_enabled() else y.dtype
1171
- linear_weight = linear_weight.to(dtype)
1172
- linear_bias = linear_bias.to(dtype) if linear_bias is not None else None
1173
- out = F.linear(y.to(linear_weight.dtype), linear_weight, linear_bias)
1174
- # We don't store y, will be recomputed in the backward pass to save memory
1175
- ctx.save_for_backward(residual_out, norm_weight, norm_bias, linear_weight, mean, rstd)
1176
- ctx.x_shape_og = x_shape_og
1177
- ctx.eps = eps
1178
- ctx.is_rms_norm = is_rms_norm
1179
- ctx.has_residual = residual is not None
1180
- ctx.prenorm = prenorm
1181
- ctx.x_dtype = x.dtype
1182
- ctx.linear_bias_is_none = linear_bias is None
1183
- return out if not prenorm else (out, residual_out.reshape(x_shape_og))
1184
-
1185
- @staticmethod
1186
- @custom_bwd
1187
- def backward(ctx, dout, *args):
1188
- x, norm_weight, norm_bias, linear_weight, mean, rstd = ctx.saved_tensors
1189
- dout = dout.reshape(-1, dout.shape[-1])
1190
- dy = F.linear(dout, linear_weight.t())
1191
- dlinear_bias = None if ctx.linear_bias_is_none else dout.sum(0)
1192
- dy = maybe_contiguous_lastdim(dy)
1193
- assert dy.shape == x.shape
1194
- if ctx.prenorm:
1195
- dresidual = args[0]
1196
- dresidual = maybe_contiguous_lastdim(dresidual.reshape(-1, dresidual.shape[-1]))
1197
- assert dresidual.shape == x.shape
1198
- else:
1199
- dresidual = None
1200
- dx, dnorm_weight, dnorm_bias, dresidual_in, _, _, _, y = _layer_norm_bwd(
1201
- dy,
1202
- x,
1203
- norm_weight,
1204
- norm_bias,
1205
- ctx.eps,
1206
- mean,
1207
- rstd,
1208
- dresidual=dresidual,
1209
- has_residual=ctx.has_residual,
1210
- is_rms_norm=ctx.is_rms_norm,
1211
- x_dtype=ctx.x_dtype,
1212
- recompute_output=True,
1213
- )
1214
- dlinear_weight = torch.einsum("bo,bi->oi", dout, y)
1215
- return (
1216
- dx.reshape(ctx.x_shape_og),
1217
- dnorm_weight,
1218
- dnorm_bias,
1219
- dlinear_weight,
1220
- dlinear_bias,
1221
- dresidual_in.reshape(ctx.x_shape_og) if ctx.has_residual else None,
1222
- None,
1223
- None,
1224
- None,
1225
- None,
1226
- )
1227
-
1228
-
1229
- def layer_norm_linear_fn(
1230
- x,
1231
- norm_weight,
1232
- norm_bias,
1233
- linear_weight,
1234
- linear_bias,
1235
- residual=None,
1236
- eps=1e-6,
1237
- prenorm=False,
1238
- residual_in_fp32=False,
1239
- is_rms_norm=False,
1240
- ):
1241
- return LayerNormLinearFn.apply(
1242
- x,
1243
- norm_weight,
1244
- norm_bias,
1245
- linear_weight,
1246
- linear_bias,
1247
- residual,
1248
- eps,
1249
- prenorm,
1250
- residual_in_fp32,
1251
- is_rms_norm,
1252
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/flash_attn/ops/triton/linear.py DELETED
@@ -1,594 +0,0 @@
1
- # Adapted from https://github.com/ELS-RD/kernl/blob/main/src/kernl/implementations/linear_layer.py
2
- # and https://github.com/openai/triton/blob/master/python/triton/ops/matmul.py
3
- from typing import Optional
4
-
5
- import torch
6
- import triton
7
- import triton.language as tl
8
- from triton.ops.matmul_perf_model import early_config_prune, estimate_matmul_time
9
-
10
- from flash_attn.ops.triton.k_activations import (
11
- gelu,
12
- gelu_approx,
13
- gelu_approx_grad,
14
- gelu_grad,
15
- squared_relu,
16
- squared_relu_grad,
17
- )
18
-
19
- # CREDITS: Initially inspired by the Triton tutorial on matrix multiplications
20
-
21
-
22
- def init_to_zero(name):
23
- return lambda nargs: nargs[name].zero_()
24
-
25
-
26
- def get_configs_io_bound():
27
- configs = []
28
- for num_stages in [2, 3, 4, 5, 6]:
29
- for block_m in [16, 32]:
30
- for block_k in [32, 64]:
31
- for block_n in [32, 64, 128, 256]:
32
- num_warps = 2 if block_n <= 64 else 4
33
- configs.append(
34
- triton.Config(
35
- {
36
- "BLOCK_M": block_m,
37
- "BLOCK_N": block_n,
38
- "BLOCK_K": block_k,
39
- "SPLIT_K": 1,
40
- },
41
- num_stages=num_stages,
42
- num_warps=num_warps,
43
- )
44
- )
45
- # split_k not used
46
- # for split_k in [2, 4, 8, 16]:
47
- # configs.append(triton.Config(
48
- # {'BLOCK_M': block_m, 'BLOCK_N': block_n, 'BLOCK_K': block_k, 'SPLIT_K': split_k},
49
- # num_stages=num_stages, num_warps=num_warps, pre_hook=init_to_zero('C')))
50
- return configs
51
-
52
-
53
- @triton.autotune(
54
- configs=[
55
- triton.Config(
56
- {"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=3, num_warps=8
57
- ),
58
- triton.Config(
59
- {"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=3, num_warps=8
60
- ),
61
- triton.Config(
62
- {"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4
63
- ),
64
- triton.Config(
65
- {"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4
66
- ),
67
- triton.Config(
68
- {"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4
69
- ),
70
- triton.Config(
71
- {"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4
72
- ),
73
- triton.Config(
74
- {"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4
75
- ),
76
- triton.Config(
77
- {"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4
78
- ),
79
- triton.Config(
80
- {"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=5, num_warps=2
81
- ),
82
- # good for int8
83
- triton.Config(
84
- {"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1},
85
- num_stages=3,
86
- num_warps=8,
87
- ),
88
- triton.Config(
89
- {"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1},
90
- num_stages=3,
91
- num_warps=8,
92
- ),
93
- triton.Config(
94
- {"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4
95
- ),
96
- triton.Config(
97
- {"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4
98
- ),
99
- triton.Config(
100
- {"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1},
101
- num_stages=4,
102
- num_warps=4,
103
- ),
104
- triton.Config(
105
- {"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4
106
- ),
107
- triton.Config(
108
- {"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4
109
- ),
110
- triton.Config(
111
- {"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4
112
- ),
113
- triton.Config(
114
- {"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=5, num_warps=2
115
- ),
116
- ]
117
- + get_configs_io_bound(),
118
- key=["CACHE_KEY_M", "CACHE_KEY_N", "CACHE_KEY_K"],
119
- prune_configs_by={
120
- "early_config_prune": early_config_prune,
121
- "perf_model": estimate_matmul_time,
122
- "top_k": 10,
123
- },
124
- )
125
- @triton.heuristics(
126
- {
127
- "EVEN_K": lambda args: args["K"] % (args["BLOCK_K"] * args["SPLIT_K"]) == 0,
128
- }
129
- )
130
- @triton.jit
131
- def kernel_fwd(
132
- C, # Pointers to matrices
133
- ACT_INPUT,
134
- A,
135
- B,
136
- bias,
137
- # Matrix dimensions
138
- M,
139
- N,
140
- K,
141
- CACHE_KEY_M,
142
- CACHE_KEY_N,
143
- CACHE_KEY_K,
144
- # The stride variables represent how much to increase the ptr by when moving by 1
145
- # element in a particular dimension. E.g. stride_am is how much to increase a_ptr
146
- # by to get the element one row down (A has M rows)
147
- stride_cm,
148
- # stride_cn, # Assume that stride_cn == 1
149
- stride_am,
150
- stride_ak,
151
- stride_bn,
152
- stride_bk,
153
- # Meta-parameters
154
- BLOCK_M: tl.constexpr,
155
- GROUP_M: tl.constexpr,
156
- BLOCK_N: tl.constexpr,
157
- BLOCK_K: tl.constexpr,
158
- # split k not used, not performant with activation, kept because early_config_prune is expecting it
159
- SPLIT_K: tl.constexpr,
160
- EVEN_K: tl.constexpr,
161
- A_ROWMAJOR: tl.constexpr,
162
- B_COLMAJOR: tl.constexpr,
163
- BIAS: tl.constexpr,
164
- SAVE_ACT_INPUT: tl.constexpr,
165
- ACTIVATION: tl.constexpr,
166
- ):
167
-
168
- """
169
- Kernel for computing Out = activation(A x W + C)
170
- - Input has shape (M, K)
171
- - Weight has shape (K, N)
172
- - Bias has shape (N,)
173
- - Output has shape (M, N)
174
- - ActInputs (optional) has shape (M, N)
175
- 'ActInputs' optionally saves the A x W + C intermediate for backward computations
176
- This kernel will consolidate over K
177
- """
178
-
179
- pid = tl.program_id(axis=0)
180
-
181
- grid_m = (M + BLOCK_M - 1) // BLOCK_M
182
- grid_n = (N + BLOCK_N - 1) // BLOCK_N
183
- # re-order program ID for better L2 performance
184
- width = GROUP_M * grid_n
185
- group_id = pid // width
186
- group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
187
- pid_m = group_id * GROUP_M + (pid % group_size)
188
- pid_n = (pid % width) // (group_size)
189
-
190
- # now compute the block that each program will go through
191
- # rm (resp. rn) denotes a range of indices
192
- # for rows (resp. col) of C
193
- rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
194
- rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
195
- # trick to avoid masking on M and N axis
196
- ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
197
- rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
198
- rk = tl.arange(0, BLOCK_K)
199
-
200
- if A_ROWMAJOR:
201
- A = A + (ram[:, None] * stride_am + rk[None, :])
202
- else:
203
- A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
204
- if B_COLMAJOR:
205
- B = B + (rk[:, None] + rbn[None, :] * stride_bn)
206
- else:
207
- B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
208
-
209
- acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
210
-
211
- for k in range(K, 0, -BLOCK_K):
212
- if EVEN_K:
213
- a = tl.load(A)
214
- b = tl.load(B)
215
- else:
216
- a = tl.load(A, mask=rk[None, :] < k, other=0.0)
217
- b = tl.load(B, mask=rk[:, None] < k, other=0.0)
218
- acc += tl.dot(a, b)
219
-
220
- if A_ROWMAJOR:
221
- A += BLOCK_K
222
- else:
223
- A += BLOCK_K * stride_ak
224
- if B_COLMAJOR:
225
- B += BLOCK_K
226
- else:
227
- B += BLOCK_K * stride_bk
228
-
229
- # Putting bias after the matmul (instead of before) is faster, idk why
230
- if BIAS:
231
- bias = tl.load(bias + rn, mask=rn < N, other=0.0).to(tl.float32)
232
- acc += bias[None, :]
233
-
234
- # optional: save the activation inputs
235
- if SAVE_ACT_INPUT:
236
- # act_in_ptrs = ACT_INPUT + ram[:, None] * stride_cm + rbn[None, :] * stride_cn
237
- act_in_ptrs = ACT_INPUT + ram[:, None] * stride_cm + rbn[None, :]
238
- tl.store(act_in_ptrs, acc)
239
-
240
- # optional: fused activation (while the data is in shared memory)
241
- if ACTIVATION == "gelu":
242
- acc = gelu(acc)
243
- elif ACTIVATION == "gelu_approx":
244
- acc = gelu_approx(acc)
245
- elif ACTIVATION == "squared_relu":
246
- acc = squared_relu(acc)
247
- # rematerialize rm and rn to save registers
248
- rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
249
- rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
250
-
251
- # write back result
252
- # C = C + rm[:, None] * stride_cm + rn[None, :] * stride_cn
253
- C = C + rm[:, None] * stride_cm + rn[None, :]
254
- mask = (rm < M)[:, None] & (rn < N)[None, :]
255
- tl.store(C, acc)
256
-
257
-
258
- def triton_linear_act(
259
- x: torch.Tensor,
260
- weight: torch.Tensor,
261
- bias: Optional[torch.Tensor] = None,
262
- activation: str = "id",
263
- save_act_input: bool = False,
264
- ) -> torch.Tensor:
265
- """
266
- Compute e = activation(x @ weight.T + bias).
267
- This wrapper kicks the `kernel_fwd` Triton kernel
268
- :param x: input tensor
269
- :param weight: weight matrix
270
- :param bias: an optional bias tensor
271
- :param activation: Activation name. Needs to be a Triton kernel.
272
- :param act_input: an optional tensor to save the activation inputs (for backward)
273
- :return: result tensor
274
- """
275
- # if torch.is_autocast_enabled():
276
- # dtype = torch.get_autocast_gpu_dtype()
277
- # x, weight, bias = [a.to(dtype=dtype) for a in [x, weight, bias]]
278
-
279
- assert activation in ["id", "gelu", "gelu_approx", "squared_relu"]
280
-
281
- batch_shape, n = x.shape[:-1], x.shape[-1]
282
- batch_dim = batch_shape.numel()
283
- x_reshaped = x.reshape(batch_dim, n)
284
-
285
- if x_reshaped.stride(0) > 1 and x_reshaped.stride(1) > 1:
286
- x_reshaped = x_reshaped.contiguous()
287
- if weight.stride(0) > 1 and weight.stride(1) > 1:
288
- weight = weight.contiguous()
289
- bias = bias.contiguous() if bias is not None else None
290
-
291
- assert (
292
- x.dtype == weight.dtype
293
- ), f"Input and weight must have the same dtype, got {x.dtype} and {weight.dtype}"
294
- if bias is not None:
295
- assert (
296
- x.dtype == bias.dtype
297
- ), f"Input and bias must have the same dtype, got {x.dtype} and {bias.dtype}"
298
- assert (
299
- x_reshaped.shape[1] == weight.shape[1]
300
- ), f"Incompatible dimensions: {x_reshaped.shape} - {weight.shape}"
301
-
302
- assert (
303
- bias is None or bias.shape[0] == weight.shape[0]
304
- ), "Incompatible dimensions in between weight and bias"
305
-
306
- M, K = x_reshaped.shape
307
- N, K = weight.shape
308
-
309
- output = torch.empty((M, N), device=x.device, dtype=x.dtype)
310
- act_input = torch.empty_like(output) if save_act_input else None
311
-
312
- # 1D launch kernel where each block gets its own program.
313
- grid = lambda META: (triton.cdiv(M, META["BLOCK_M"]) * triton.cdiv(N, META["BLOCK_N"]),) # noqa
314
-
315
- kernel_fwd[grid](
316
- output,
317
- act_input,
318
- x_reshaped,
319
- weight, # data ptrs
320
- bias if bias is not None else x, # auto skip bias if not present
321
- M, # shapes
322
- N,
323
- K,
324
- M // 32, # key for triton cache (limit number of compilations)
325
- N // 32,
326
- K // 32,
327
- stride_cm=output.stride(0), # strides
328
- # stride_cn=output.stride(1),
329
- stride_am=x_reshaped.stride(0),
330
- stride_ak=x_reshaped.stride(1),
331
- stride_bk=weight.stride(1),
332
- stride_bn=weight.stride(0),
333
- BIAS=bias is not None, # optional fused bias
334
- SAVE_ACT_INPUT=save_act_input, # optional save activation inputs
335
- ACTIVATION=activation, # optional fused activation
336
- A_ROWMAJOR=x_reshaped.stride(1) == 1,
337
- B_COLMAJOR=weight.stride(1) == 1,
338
- GROUP_M=8, # speed optimization: group the programs
339
- )
340
-
341
- if not save_act_input:
342
- return output.reshape(*batch_shape, output.shape[-1])
343
- else:
344
- return (
345
- output.reshape(*batch_shape, output.shape[-1]),
346
- act_input.reshape(*batch_shape, act_input.shape[-1]),
347
- )
348
-
349
-
350
- @triton.autotune(
351
- configs=[
352
- triton.Config(
353
- {"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=3, num_warps=8
354
- ),
355
- triton.Config(
356
- {"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=3, num_warps=8
357
- ),
358
- triton.Config(
359
- {"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4
360
- ),
361
- triton.Config(
362
- {"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4
363
- ),
364
- triton.Config(
365
- {"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4
366
- ),
367
- triton.Config(
368
- {"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4
369
- ),
370
- triton.Config(
371
- {"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4
372
- ),
373
- triton.Config(
374
- {"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=4, num_warps=4
375
- ),
376
- triton.Config(
377
- {"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 32, "SPLIT_K": 1}, num_stages=5, num_warps=2
378
- ),
379
- # good for int8
380
- triton.Config(
381
- {"BLOCK_M": 128, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1},
382
- num_stages=3,
383
- num_warps=8,
384
- ),
385
- triton.Config(
386
- {"BLOCK_M": 256, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1},
387
- num_stages=3,
388
- num_warps=8,
389
- ),
390
- triton.Config(
391
- {"BLOCK_M": 256, "BLOCK_N": 64, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4
392
- ),
393
- triton.Config(
394
- {"BLOCK_M": 64, "BLOCK_N": 256, "BLOCK_K": 128, "SPLIT_K": 1}, num_stages=4, num_warps=4
395
- ),
396
- triton.Config(
397
- {"BLOCK_M": 128, "BLOCK_N": 128, "BLOCK_K": 128, "SPLIT_K": 1},
398
- num_stages=4,
399
- num_warps=4,
400
- ),
401
- triton.Config(
402
- {"BLOCK_M": 128, "BLOCK_N": 64, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4
403
- ),
404
- triton.Config(
405
- {"BLOCK_M": 64, "BLOCK_N": 128, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4
406
- ),
407
- triton.Config(
408
- {"BLOCK_M": 128, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=4, num_warps=4
409
- ),
410
- triton.Config(
411
- {"BLOCK_M": 64, "BLOCK_N": 32, "BLOCK_K": 64, "SPLIT_K": 1}, num_stages=5, num_warps=2
412
- ),
413
- ]
414
- + get_configs_io_bound(),
415
- key=["CACHE_KEY_M", "CACHE_KEY_N", "CACHE_KEY_K"],
416
- prune_configs_by={
417
- "early_config_prune": early_config_prune,
418
- "perf_model": estimate_matmul_time,
419
- "top_k": 10,
420
- },
421
- )
422
- @triton.heuristics(
423
- {
424
- "EVEN_K": lambda args: args["K"] % (args["BLOCK_K"] * args["SPLIT_K"]) == 0,
425
- }
426
- )
427
- @triton.jit
428
- def kernel_bwd(
429
- C, # Pointers to matrices
430
- ACT_INPUT,
431
- A,
432
- B,
433
- # Matrix dimensions
434
- M,
435
- N,
436
- K,
437
- CACHE_KEY_M,
438
- CACHE_KEY_N,
439
- CACHE_KEY_K,
440
- # The stride variables represent how much to increase the ptr by when moving by 1
441
- # element in a particular dimension. E.g. stride_am is how much to increase a_ptr
442
- # by to get the element one row down (A has M rows)
443
- stride_cm,
444
- # stride_cn, # Assume that stride_cn == 1
445
- stride_am,
446
- stride_ak,
447
- stride_bk,
448
- stride_bn,
449
- # Meta-parameters
450
- BLOCK_M: tl.constexpr,
451
- GROUP_M: tl.constexpr,
452
- BLOCK_N: tl.constexpr,
453
- BLOCK_K: tl.constexpr,
454
- # split k not used, not performant with activation, kept because early_config_prune is expecting it
455
- SPLIT_K: tl.constexpr,
456
- EVEN_K: tl.constexpr,
457
- ACTIVATION: tl.constexpr,
458
- ):
459
-
460
- """
461
- Kernel for computing Out = activation(A x W + C)
462
- - Input has shape (M, K)
463
- - Weight has shape (K, N)
464
- - Output has shape (M, N)
465
- - ActInputs (optional) has shape (M, N)
466
- 'ActInputs' optionally saves the A x W + C intermediate for backward computations
467
- This kernel will consolidate over K
468
- """
469
-
470
- pid = tl.program_id(axis=0)
471
-
472
- grid_m = (M + BLOCK_M - 1) // BLOCK_M
473
- grid_n = (N + BLOCK_N - 1) // BLOCK_N
474
- # re-order program ID for better L2 performance
475
- width = GROUP_M * grid_n
476
- group_id = pid // width
477
- group_size = min(grid_m - group_id * GROUP_M, GROUP_M)
478
- pid_m = group_id * GROUP_M + (pid % group_size)
479
- pid_n = (pid % width) // (group_size)
480
-
481
- # now compute the block that each program will go through
482
- # rm (resp. rn) denotes a range of indices
483
- # for rows (resp. col) of C
484
- rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
485
- rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
486
- # trick to avoid masking on M and N axis
487
- ram = tl.max_contiguous(tl.multiple_of(rm % M, BLOCK_M), BLOCK_M)
488
- rbn = tl.max_contiguous(tl.multiple_of(rn % N, BLOCK_N), BLOCK_N)
489
- rk = tl.arange(0, BLOCK_K)
490
-
491
- A = A + (ram[:, None] * stride_am + rk[None, :] * stride_ak)
492
- B = B + (rk[:, None] * stride_bk + rbn[None, :] * stride_bn)
493
-
494
- acc = tl.zeros((BLOCK_M, BLOCK_N), dtype=tl.float32)
495
-
496
- for k in range(K, 0, -BLOCK_K):
497
- if EVEN_K:
498
- a = tl.load(A)
499
- b = tl.load(B)
500
- else:
501
- a = tl.load(A, mask=rk[None, :] < k, other=0.0)
502
- b = tl.load(B, mask=rk[:, None] < k, other=0.0)
503
- acc += tl.dot(a, b)
504
-
505
- A += BLOCK_K * stride_ak
506
- B += BLOCK_K * stride_bk
507
-
508
- # optional: fused activation (while the data is in shared memory)
509
- if ACTIVATION != "id":
510
- act_in_ptrs = ACT_INPUT + ram[:, None] * stride_cm + rbn[None, :]
511
- act_input = tl.load(act_in_ptrs).to(acc.dtype)
512
- if ACTIVATION == "gelu":
513
- acc *= gelu_grad(act_input)
514
- elif ACTIVATION == "gelu_approx":
515
- acc *= gelu_approx_grad(act_input)
516
- elif ACTIVATION == "squared_relu":
517
- acc *= squared_relu_grad(act_input)
518
-
519
- # rematerialize rm and rn to save registers
520
- rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
521
- rn = pid_n * BLOCK_N + tl.arange(0, BLOCK_N)
522
-
523
- # write back result
524
- C = C + rm[:, None] * stride_cm + rn[None, :]
525
- mask = (rm < M)[:, None] & (rn < N)[None, :]
526
- tl.store(C, acc, mask=mask)
527
-
528
-
529
- def triton_dgrad_act(
530
- grad_output: torch.Tensor,
531
- weight: torch.Tensor,
532
- activation: str = "id",
533
- act_input: Optional[torch.Tensor] = None,
534
- ) -> torch.Tensor:
535
- """
536
- Compute e = activation(grad_output @ weight + bias).
537
- This wrapper kicks the `kernel_fwd` Triton kernel
538
- :param grad_output: input tensor
539
- :param weight: weight matrix
540
- :param activation: Activation name. Needs to be a Triton kernel.
541
- :param act_input: an optional tensor to save the activation inputs (for backward)
542
- :return: result tensor
543
- """
544
- assert activation in ["id", "gelu", "gelu_approx", "squared_relu"]
545
-
546
- batch_shape, n = grad_output.shape[:-1], grad_output.shape[-1]
547
- batch_dim = batch_shape.numel()
548
- grad_output_reshaped = grad_output.reshape(batch_dim, n)
549
-
550
- if grad_output_reshaped.stride(0) > 1 and grad_output_reshaped.stride(1) > 1:
551
- grad_output_reshaped = grad_output_reshaped.contiguous()
552
- if weight.stride(0) > 1 and weight.stride(1) > 1:
553
- weight = weight.contiguous()
554
-
555
- assert (
556
- grad_output.dtype == weight.dtype
557
- ), f"grad_output and weight must have the same dtype, got {grad_output.dtype} and {weight.dtype}"
558
- assert (
559
- grad_output_reshaped.shape[1] == weight.shape[0]
560
- ), f"Incompatible dimensions: {grad_output_reshaped.shape} - {weight.shape}"
561
- if activation != "id":
562
- assert act_input is not None, f"act_input is required for activation {activation}"
563
-
564
- # M, N, K in bwd are different from M, N, K in fwd
565
- M, K = grad_output_reshaped.shape
566
- K, N = weight.shape
567
-
568
- grad_input = torch.empty((M, N), device=grad_output.device, dtype=grad_output.dtype)
569
-
570
- # 1D launch kernel where each block gets its own program.
571
- grid = lambda META: (triton.cdiv(M, META["BLOCK_M"]) * triton.cdiv(N, META["BLOCK_N"]),) # noqa
572
-
573
- kernel_bwd[grid](
574
- grad_input,
575
- act_input,
576
- grad_output_reshaped,
577
- weight, # data ptrs
578
- M, # shapes
579
- N,
580
- K,
581
- M // 32, # key for triton cache (limit number of compilations)
582
- N // 32,
583
- K // 32,
584
- stride_cm=grad_input.stride(0), # strides
585
- # stride_cn=grad_input.stride(1),
586
- stride_am=grad_output_reshaped.stride(0),
587
- stride_ak=grad_output_reshaped.stride(1),
588
- stride_bk=weight.stride(0),
589
- stride_bn=weight.stride(1),
590
- ACTIVATION=activation, # optional fused activation
591
- GROUP_M=8, # speed optimization: group the programs
592
- )
593
-
594
- return grad_input.reshape(*batch_shape, grad_input.shape[-1])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/flash_attn/ops/triton/mlp.py DELETED
@@ -1,149 +0,0 @@
1
- # The triton fused matmul + sqrelu is faster for fp16 but slower for bf16, compared
2
- # to naive implementation.
3
- import fused_dense_lib as fused_dense_cuda
4
- import torch
5
- import torch.nn as nn
6
- import torch.nn.functional as F
7
-
8
- from flash_attn.utils.torch import custom_fwd, custom_bwd
9
- from flash_attn.ops.activations import sqrelu_bwd, sqrelu_fwd
10
- from flash_attn.ops.triton.linear import triton_dgrad_act, triton_linear_act
11
-
12
-
13
- class FusedDenseSqreluDenseFunc(torch.autograd.Function):
14
- @staticmethod
15
- @custom_fwd
16
- def forward(ctx, x, weight1, bias1, weight2, bias2, checkpoint_lvl=0):
17
- """checkpoint_lvl:
18
- 0: no recomputation in the bwd
19
- 1: recompute gelu_out in the bwd
20
- 2: recompute act_input and gelu_out in the bwd
21
- """
22
- if torch.is_autocast_enabled():
23
- dtype = torch.get_autocast_gpu_dtype()
24
- x, weight1, bias1, weight2, bias2 = [
25
- a.to(dtype=dtype) for a in [x, weight1, bias1, weight2, bias2]
26
- ]
27
- is_bf16 = x.dtype == torch.bfloat16
28
- assert checkpoint_lvl in [0, 1, 2]
29
- x = x.contiguous()
30
- weight1 = weight1.contiguous()
31
- bias1 = bias1.contiguous()
32
- weight2 = weight2.contiguous()
33
- bias2 = bias2.contiguous()
34
- batch_shape, n = x.shape[:-1], x.shape[-1]
35
- batch_dim = batch_shape.numel()
36
- if is_bf16:
37
- act_input = fused_dense_cuda.linear_bias_forward(
38
- x.reshape(batch_dim, n), weight1, bias1
39
- )
40
- output1 = sqrelu_fwd(act_input)
41
- else:
42
- save_act_input = checkpoint_lvl != 2
43
- result = triton_linear_act(
44
- x.reshape(batch_dim, n),
45
- weight1,
46
- bias1,
47
- activation="squared_relu",
48
- save_act_input=save_act_input,
49
- )
50
- if save_act_input:
51
- output1, act_input = result
52
- else:
53
- output1 = result
54
- output2 = fused_dense_cuda.linear_bias_forward(output1, weight2, bias2)
55
- ctx.checkpoint_lvl = checkpoint_lvl
56
- if checkpoint_lvl == 0:
57
- ctx.save_for_backward(x, weight1, bias1, weight2, act_input, output1)
58
- elif checkpoint_lvl == 1:
59
- ctx.save_for_backward(x, weight1, bias1, weight2, act_input)
60
- elif checkpoint_lvl == 2:
61
- ctx.save_for_backward(x, weight1, bias1, weight2)
62
- return output2.reshape(*batch_shape, output2.shape[-1])
63
-
64
- @staticmethod
65
- @custom_bwd
66
- def backward(ctx, grad_output):
67
- grad_output = grad_output.contiguous()
68
- checkpoint_lvl = ctx.checkpoint_lvl
69
- x, weight1, bias1, weight2, *rest = ctx.saved_tensors
70
- batch_shape, n = x.shape[:-1], x.shape[-1]
71
- batch_dim = batch_shape.numel()
72
- is_bf16 = x.dtype == torch.bfloat16
73
- if checkpoint_lvl == 0:
74
- act_input, output1 = rest
75
- elif checkpoint_lvl == 1:
76
- (act_input,) = rest
77
- output1 = sqrelu_fwd(act_input)
78
- elif checkpoint_lvl == 2:
79
- if is_bf16:
80
- act_input = fused_dense_cuda.linear_bias_forward(
81
- x.reshape(batch_dim, n), weight1, bias1
82
- )
83
- output1 = sqrelu_fwd(act_input)
84
- else:
85
- output1, act_input = triton_linear_act(
86
- x.reshape(batch_dim, n),
87
- weight1,
88
- bias1,
89
- activation="squared_relu",
90
- save_act_input=True,
91
- )
92
-
93
- if is_bf16:
94
- grad_output = grad_output.reshape(batch_dim, grad_output.shape[-1])
95
- grad_weight2, grad_bias2 = fused_dense_cuda.linear_bias_wgrad(output1, grad_output)
96
- grad_output1 = grad_output @ weight2
97
- grad_act_input = sqrelu_bwd(grad_output1, act_input)
98
- grad_input, grad_weight1, grad_bias1 = fused_dense_cuda.linear_bias_backward(
99
- x.reshape(batch_dim, n), weight1, grad_act_input
100
- )
101
- else:
102
- grad_output = grad_output.reshape(batch_dim, grad_output.shape[-1])
103
- grad_weight2, grad_bias2 = fused_dense_cuda.linear_bias_wgrad(output1, grad_output)
104
- grad_act_input = triton_dgrad_act(
105
- grad_output, weight2, activation="squared_relu", act_input=act_input
106
- )
107
- grad_input, grad_weight1, grad_bias1 = fused_dense_cuda.linear_bias_backward(
108
- x.reshape(batch_dim, n), weight1, grad_act_input
109
- )
110
- return grad_input.reshape_as(x), grad_weight1, grad_bias1, grad_weight2, grad_bias2, None
111
-
112
-
113
- fused_dense_sqrelu_dense_function = FusedDenseSqreluDenseFunc.apply
114
-
115
-
116
- class FusedDenseSqreluDense(nn.Module):
117
- def __init__(
118
- self,
119
- in_features,
120
- hidden_features=None,
121
- out_features=None,
122
- bias1=True,
123
- bias2=True,
124
- checkpoint_lvl=0,
125
- device=None,
126
- dtype=None,
127
- ):
128
- """
129
- checkpoint_lvl (increasing lvl means slower but more memory saving):
130
- 0: no recomputation in the bwd
131
- 1: recompute gelu_out in the bwd
132
- 2: recompute gelu_in and gelu_out in the bwd
133
- """
134
- assert checkpoint_lvl in [0, 1, 2]
135
- factory_kwargs = {"device": device, "dtype": dtype}
136
- super().__init__()
137
- out_features = out_features or in_features
138
- hidden_features = hidden_features or in_features * 4
139
- assert bias1 == True, "DenseSqreluDense module without bias is currently not supported"
140
- assert bias2 == True, "DenseSqreluDense module without bias is currently not supported"
141
- self.checkpoint_lvl = checkpoint_lvl
142
- self.fc1 = nn.Linear(in_features, hidden_features, bias=bias1, **factory_kwargs)
143
- self.fc2 = nn.Linear(hidden_features, out_features, bias=bias2, **factory_kwargs)
144
-
145
- def forward(self, x):
146
- assert x.is_cuda
147
- return fused_dense_sqrelu_dense_function(
148
- x, self.fc1.weight, self.fc1.bias, self.fc2.weight, self.fc2.bias, self.checkpoint_lvl
149
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu124-x86_64-linux/flash_attn/ops/triton/rotary.py DELETED
@@ -1,185 +0,0 @@
1
- # Copyright (c) 2025, Tri Dao.
2
- # As of 2025-04-23, we require triton >= 3.0
3
-
4
- from typing import Optional, Union
5
-
6
- import torch
7
-
8
- import triton
9
- import triton.language as tl
10
-
11
-
12
- @triton.jit
13
- def rotary_kernel(
14
- OUT, # Pointers to matrices
15
- X,
16
- COS,
17
- SIN,
18
- CU_SEQLENS,
19
- SEQLEN_OFFSETS, # this could be int or a pointer
20
- # Matrix dimensions
21
- seqlen,
22
- nheads,
23
- seqlen_ro,
24
- # strides
25
- stride_out_batch,
26
- stride_out_seqlen,
27
- stride_out_nheads,
28
- stride_out_headdim,
29
- stride_x_batch,
30
- stride_x_seqlen,
31
- stride_x_nheads,
32
- stride_x_headdim,
33
- # Meta-parameters
34
- # We want ROTARY_DIM to be constexpr, otherwise the triton compiler doesn't know that
35
- # the mask is constant every 8 elements, and it will generate LDG.16 instead of LDG.128
36
- ROTARY_DIM: tl.constexpr,
37
- IS_SEQLEN_OFFSETS_TENSOR: tl.constexpr,
38
- IS_VARLEN: tl.constexpr,
39
- INTERLEAVED: tl.constexpr,
40
- CONJUGATE: tl.constexpr,
41
- BLOCK_H: tl.constexpr,
42
- BLOCK_M: tl.constexpr,
43
- ):
44
- BLOCK_K: tl.constexpr = triton.next_power_of_2(ROTARY_DIM)
45
- ROTARY_DIM_HALF = ROTARY_DIM // 2
46
- pid_head = tl.program_id(axis=0)
47
- pid_m = tl.program_id(axis=1)
48
- pid_batch = tl.program_id(axis=2)
49
-
50
- if not IS_VARLEN:
51
- X = X + pid_batch * stride_x_batch
52
- OUT = OUT + pid_batch * stride_out_batch
53
- else:
54
- start_idx = tl.load(CU_SEQLENS + pid_batch)
55
- seqlen = tl.load(CU_SEQLENS + pid_batch + 1) - start_idx
56
- X = X + start_idx * stride_x_seqlen
57
- OUT = OUT + start_idx * stride_out_seqlen
58
-
59
- if pid_m * BLOCK_M >= seqlen:
60
- return
61
-
62
- rh = pid_head * BLOCK_H + tl.arange(0, BLOCK_H)
63
- rm = pid_m * BLOCK_M + tl.arange(0, BLOCK_M)
64
- if not IS_SEQLEN_OFFSETS_TENSOR:
65
- rm_cs = rm + SEQLEN_OFFSETS
66
- else:
67
- rm_cs = rm + tl.load(SEQLEN_OFFSETS + pid_batch)
68
-
69
- rk_half = tl.arange(0, BLOCK_K // 2)
70
- COS = COS + (rm_cs[:, None] * ROTARY_DIM_HALF + rk_half[None, :])
71
- SIN = SIN + (rm_cs[:, None] * ROTARY_DIM_HALF + rk_half[None, :])
72
- mask_cs = (rm_cs[:, None] < seqlen_ro) & (rk_half[None, :] < ROTARY_DIM_HALF)
73
- cos = tl.load(COS, mask=mask_cs, other=1.0).to(tl.float32)
74
- sin = tl.load(SIN, mask=mask_cs, other=0.0).to(tl.float32)
75
- if CONJUGATE:
76
- sin = -sin
77
-
78
- if not INTERLEAVED:
79
- # Load the 1st and 2nd halves of X, do calculation, then store to 1st and 2nd halves of OUT
80
- X = X + (rh[:, None, None] * stride_x_nheads + rm[None, :, None] * stride_x_seqlen + rk_half[None, None, :] * stride_x_headdim)
81
- OUT = OUT + (rh[:, None, None] * stride_out_nheads + rm[None, :, None] * stride_out_seqlen + rk_half[None, None, :] * stride_out_headdim)
82
- mask = (rh[:, None, None] < nheads) & (rm[None, :, None] < seqlen) & (rk_half[None, None, :] < ROTARY_DIM_HALF)
83
- x0 = tl.load(X, mask=mask, other=0.0).to(tl.float32)
84
- x1 = tl.load(X + ROTARY_DIM_HALF * stride_x_headdim, mask=mask, other=0.0,).to(tl.float32)
85
- o0 = x0 * cos - x1 * sin
86
- o1 = x0 * sin + x1 * cos
87
- tl.store(OUT, o0, mask=mask)
88
- tl.store(OUT + ROTARY_DIM_HALF * stride_out_headdim, o1, mask=mask)
89
- else:
90
- rk = tl.arange(0, BLOCK_K)
91
- X = X + (rh[:, None, None] * stride_x_nheads + rm[None, :, None] * stride_x_seqlen + rk[None, None, :] * stride_x_headdim)
92
- OUT = OUT + (rh[:, None, None] * stride_out_nheads + rm[None, :, None] * stride_out_seqlen + rk[None, None, :] * stride_out_headdim)
93
- mask = (rh[:, None, None] < nheads) & (rm[None, :, None] < seqlen) & (rk[None, None, :] < ROTARY_DIM)
94
- x = tl.load(X, mask=mask, other=0.0).to(tl.float32)
95
- x0, x1 = tl.split(tl.reshape(x, [BLOCK_H, BLOCK_M, BLOCK_K // 2, 2]))
96
- o0 = x0 * cos - x1 * sin
97
- o1 = x0 * sin + x1 * cos
98
- o = tl.reshape(tl.join(o0, o1), [BLOCK_H, BLOCK_M, BLOCK_K])
99
- tl.store(OUT, o, mask=mask)
100
-
101
-
102
- def apply_rotary(
103
- x: torch.Tensor,
104
- cos: torch.Tensor,
105
- sin: torch.Tensor,
106
- seqlen_offsets: Union[int, torch.Tensor] = 0,
107
- cu_seqlens: Optional[torch.Tensor] = None,
108
- max_seqlen: Optional[int] = None,
109
- interleaved=False,
110
- inplace=False,
111
- conjugate=False,
112
- ) -> torch.Tensor:
113
- """
114
- Arguments:
115
- x: (batch, seqlen, nheads, headdim) if cu_seqlens is None
116
- else (total_seqlen, nheads, headdim).
117
- cos: (seqlen_ro, rotary_dim / 2)
118
- sin: (seqlen_ro, rotary_dim / 2)
119
- seqlen_offsets: integer or integer tensor of size (batch,)
120
- cu_seqlens: (batch + 1,) or None
121
- max_seqlen: int
122
- Returns:
123
- y: (batch, seqlen, nheads, headdim)
124
- """
125
- is_varlen = cu_seqlens is not None
126
- if not is_varlen:
127
- batch, seqlen, nheads, headdim = x.shape
128
- else:
129
- assert max_seqlen is not None, "If cu_seqlens is passed in, then max_seqlen must be passed"
130
- total_seqlen, nheads, headdim = x.shape
131
- batch_p_1 = cu_seqlens.shape[0]
132
- batch = batch_p_1 - 1
133
- seqlen = max_seqlen
134
- seqlen_ro, rotary_dim = cos.shape
135
- assert sin.shape == cos.shape
136
- rotary_dim *= 2
137
- assert rotary_dim <= headdim, "rotary_dim must be <= headdim"
138
- assert headdim <= 256, "Only support headdim <= 256"
139
- assert seqlen_ro >= seqlen, "seqlen_ro must be >= seqlen"
140
-
141
- cos, sin = cos.contiguous(), sin.contiguous()
142
- if isinstance(seqlen_offsets, torch.Tensor):
143
- assert seqlen_offsets.shape == (batch,)
144
- assert seqlen_offsets.dtype in [torch.int32, torch.int64]
145
- seqlen_offsets = seqlen_offsets.contiguous()
146
- else:
147
- assert seqlen_offsets + seqlen <= seqlen_ro
148
-
149
- output = torch.empty_like(x) if not inplace else x
150
- if rotary_dim < headdim and not inplace:
151
- output[..., rotary_dim:].copy_(x[..., rotary_dim:])
152
-
153
- grid = lambda META: (triton.cdiv(nheads, META["BLOCK_H"]), triton.cdiv(seqlen, META["BLOCK_M"]), batch) # noqa
154
- BLOCK_M = 8 if rotary_dim <= 128 else 4
155
-
156
- # Need this, otherwise Triton tries to launch from cuda:0 and we get
157
- # ValueError: Pointer argument (at 0) cannot be accessed from Triton (cpu tensor?)
158
- with torch.cuda.device(x.device.index):
159
- torch.library.wrap_triton(rotary_kernel)[grid](
160
- output, # data ptrs
161
- x,
162
- cos,
163
- sin,
164
- cu_seqlens,
165
- seqlen_offsets,
166
- seqlen, # shapes
167
- nheads,
168
- seqlen_ro,
169
- output.stride(0) if not is_varlen else 0, # batch_strides if not varlen else 0
170
- output.stride(-3), # seqlen_stride or total_seqlen_stride
171
- output.stride(-2), # nheads_stride
172
- output.stride(-1), # headdim_stride
173
- x.stride(0) if not is_varlen else 0, # batch_strides if not varlen else 0
174
- x.stride(-3), # seqlen stride or total_seqlen_stride
175
- x.stride(-2), # nheads stride
176
- x.stride(-1), # headdim stride
177
- rotary_dim,
178
- isinstance(seqlen_offsets, torch.Tensor),
179
- is_varlen,
180
- interleaved,
181
- conjugate,
182
- BLOCK_M=BLOCK_M,
183
- BLOCK_H=2,
184
- )
185
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu126-x86_64-linux/flash_attn/__init__.py DELETED
@@ -1,393 +0,0 @@
1
- from typing import Optional, List
2
- import torch
3
- from ._ops import ops as flash_attn_ops
4
- from .flash_attn_interface import (
5
- flash_attn_func,
6
- flash_attn_kvpacked_func,
7
- flash_attn_qkvpacked_func,
8
- flash_attn_varlen_func,
9
- flash_attn_varlen_kvpacked_func,
10
- flash_attn_varlen_qkvpacked_func,
11
- flash_attn_with_kvcache,
12
- )
13
-
14
-
15
- def fwd(
16
- q: torch.Tensor,
17
- k: torch.Tensor,
18
- v: torch.Tensor,
19
- out: Optional[torch.Tensor] = None,
20
- alibi_slopes: Optional[torch.Tensor] = None,
21
- p_dropout: float = 0.0,
22
- softmax_scale: Optional[float] = None,
23
- is_causal: bool = False,
24
- window_size_left: int = -1,
25
- window_size_right: int = -1,
26
- softcap: float = 0.0,
27
- return_softmax: bool = False,
28
- gen: Optional[torch.Generator] = None,
29
- ) -> List[torch.Tensor]:
30
- """
31
- Forward pass for multi-head attention.
32
-
33
- Args:
34
- q: Query tensor of shape [batch_size, seqlen_q, num_heads, head_size]
35
- k: Key tensor of shape [batch_size, seqlen_k, num_heads_k, head_size]
36
- v: Value tensor of shape [batch_size, seqlen_k, num_heads_k, head_size]
37
- out: Optional output tensor, same shape as q
38
- alibi_slopes: Optional ALiBi slopes tensor of shape [num_heads] or [batch_size, num_heads]
39
- p_dropout: Dropout probability
40
- softmax_scale: Scale factor for softmax
41
- is_causal: Whether to use causal attention
42
- window_size_left: Window size for left context (-1 for unlimited)
43
- window_size_right: Window size for right context (-1 for unlimited)
44
- softcap: Soft cap for attention weights
45
- return_softmax: Whether to return softmax weights
46
- gen: Optional random number generator
47
-
48
- Returns:
49
- List of tensors: [output, softmax_lse, (softmax if return_softmax)]
50
- """
51
- if softmax_scale is None:
52
- attention_head_dim = q.shape[-1]
53
- softmax_scale = 1.0 / (attention_head_dim**0.5)
54
-
55
- return flash_attn_ops.fwd(
56
- q,
57
- k,
58
- v,
59
- out,
60
- alibi_slopes,
61
- p_dropout,
62
- softmax_scale,
63
- is_causal,
64
- window_size_left,
65
- window_size_right,
66
- softcap,
67
- return_softmax,
68
- gen,
69
- )
70
-
71
-
72
- def varlen_fwd(
73
- q: torch.Tensor,
74
- k: torch.Tensor,
75
- v: torch.Tensor,
76
- cu_seqlens_q: torch.Tensor,
77
- cu_seqlens_k: torch.Tensor,
78
- out: Optional[torch.Tensor] = None,
79
- seqused_k: Optional[torch.Tensor] = None,
80
- leftpad_k: Optional[torch.Tensor] = None,
81
- block_table: Optional[torch.Tensor] = None,
82
- alibi_slopes: Optional[torch.Tensor] = None,
83
- max_seqlen_q: int = 0,
84
- max_seqlen_k: int = 0,
85
- p_dropout: float = 0.0,
86
- softmax_scale: Optional[float] = None,
87
- zero_tensors: bool = False,
88
- is_causal: bool = False,
89
- window_size_left: int = -1,
90
- window_size_right: int = -1,
91
- softcap: float = 0.0,
92
- return_softmax: bool = False,
93
- gen: Optional[torch.Generator] = None,
94
- ) -> List[torch.Tensor]:
95
- """
96
- Forward pass for multi-head attention with variable sequence lengths.
97
-
98
- Args:
99
- q: Query tensor of shape [total_q, num_heads, head_size]
100
- k: Key tensor of shape [total_k, num_heads_k, head_size] or [num_blocks, page_block_size, num_heads_k, head_size]
101
- v: Value tensor of shape [total_k, num_heads_k, head_size] or [num_blocks, page_block_size, num_heads_k, head_size]
102
- cu_seqlens_q: Cumulative sequence lengths for queries of shape [batch_size+1]
103
- cu_seqlens_k: Cumulative sequence lengths for keys of shape [batch_size+1]
104
- out: Optional output tensor of shape [total_q, num_heads, head_size]
105
- seqused_k: Optional tensor specifying how many keys to use per batch element [batch_size]
106
- leftpad_k: Optional left padding for keys of shape [batch_size]
107
- block_table: Optional block table of shape [batch_size, max_num_blocks_per_seq]
108
- alibi_slopes: Optional ALiBi slopes tensor of shape [num_heads] or [batch_size, num_heads]
109
- max_seqlen_q: Maximum sequence length for queries
110
- max_seqlen_k: Maximum sequence length for keys
111
- p_dropout: Dropout probability
112
- softmax_scale: Scale factor for softmax
113
- zero_tensors: Whether to zero tensors before computation
114
- is_causal: Whether to use causal attention
115
- window_size_left: Window size for left context (-1 for unlimited)
116
- window_size_right: Window size for right context (-1 for unlimited)
117
- softcap: Soft cap for attention weights
118
- return_softmax: Whether to return softmax weights
119
- gen: Optional random number generator
120
-
121
- Returns:
122
- List of tensors: [output, softmax_lse, (softmax if return_softmax)]
123
- """
124
- if softmax_scale is None:
125
- attention_head_dim = q.shape[-1]
126
- softmax_scale = 1.0 / (attention_head_dim**0.5)
127
-
128
- return flash_attn_ops.varlen_fwd(
129
- q,
130
- k,
131
- v,
132
- out,
133
- cu_seqlens_q,
134
- cu_seqlens_k,
135
- seqused_k,
136
- leftpad_k,
137
- block_table,
138
- alibi_slopes,
139
- max_seqlen_q,
140
- max_seqlen_k,
141
- p_dropout,
142
- softmax_scale,
143
- zero_tensors,
144
- is_causal,
145
- window_size_left,
146
- window_size_right,
147
- softcap,
148
- return_softmax,
149
- gen,
150
- )
151
-
152
-
153
- def bwd(
154
- dout: torch.Tensor,
155
- q: torch.Tensor,
156
- k: torch.Tensor,
157
- v: torch.Tensor,
158
- out: torch.Tensor,
159
- softmax_lse: torch.Tensor,
160
- dq: Optional[torch.Tensor] = None,
161
- dk: Optional[torch.Tensor] = None,
162
- dv: Optional[torch.Tensor] = None,
163
- alibi_slopes: Optional[torch.Tensor] = None,
164
- p_dropout: float = 0.0,
165
- softmax_scale: Optional[float] = None,
166
- is_causal: bool = False,
167
- window_size_left: int = -1,
168
- window_size_right: int = -1,
169
- softcap: float = 0.0,
170
- deterministic: bool = False,
171
- gen: Optional[torch.Generator] = None,
172
- rng_state: Optional[torch.Tensor] = None,
173
- ) -> List[torch.Tensor]:
174
- """
175
- Backward pass for multi-head attention.
176
-
177
- Args:
178
- dout: Gradient tensor of shape [batch_size, seqlen_q, num_heads, head_size]
179
- q: Query tensor of shape [batch_size, seqlen_q, num_heads, head_size]
180
- k: Key tensor of shape [batch_size, seqlen_k, num_heads_k, head_size]
181
- v: Value tensor of shape [batch_size, seqlen_k, num_heads_k, head_size]
182
- out: Output tensor from forward pass of shape [batch_size, seqlen_q, num_heads, head_size]
183
- softmax_lse: Log-sum-exp values from forward pass of shape [batch_size, num_heads, seqlen_q]
184
- dq: Optional gradient tensor for queries, same shape as q
185
- dk: Optional gradient tensor for keys, same shape as k
186
- dv: Optional gradient tensor for values, same shape as v
187
- alibi_slopes: Optional ALiBi slopes tensor of shape [num_heads] or [batch_size, num_heads]
188
- p_dropout: Dropout probability
189
- softmax_scale: Scale factor for softmax
190
- is_causal: Whether to use causal attention
191
- window_size_left: Window size for left context (-1 for unlimited)
192
- window_size_right: Window size for right context (-1 for unlimited)
193
- softcap: Soft cap for attention weights
194
- deterministic: Whether to use deterministic algorithms
195
- gen: Optional random number generator
196
- rng_state: Optional RNG state from forward pass
197
-
198
- Returns:
199
- List of tensors: [dq, dk, dv]
200
- """
201
- if softmax_scale is None:
202
- attention_head_dim = q.shape[-1]
203
- softmax_scale = 1.0 / (attention_head_dim**0.5)
204
-
205
- return flash_attn_ops.bwd(
206
- dout,
207
- q,
208
- k,
209
- v,
210
- out,
211
- softmax_lse,
212
- dq,
213
- dk,
214
- dv,
215
- alibi_slopes,
216
- p_dropout,
217
- softmax_scale,
218
- is_causal,
219
- window_size_left,
220
- window_size_right,
221
- softcap,
222
- deterministic,
223
- gen,
224
- rng_state,
225
- )
226
-
227
-
228
- def varlen_bwd(
229
- dout: torch.Tensor,
230
- q: torch.Tensor,
231
- k: torch.Tensor,
232
- v: torch.Tensor,
233
- out: torch.Tensor,
234
- softmax_lse: torch.Tensor,
235
- cu_seqlens_q: torch.Tensor,
236
- cu_seqlens_k: torch.Tensor,
237
- dq: Optional[torch.Tensor] = None,
238
- dk: Optional[torch.Tensor] = None,
239
- dv: Optional[torch.Tensor] = None,
240
- alibi_slopes: Optional[torch.Tensor] = None,
241
- max_seqlen_q: int = 0,
242
- max_seqlen_k: int = 0,
243
- p_dropout: float = 0.0,
244
- softmax_scale: Optional[float] = None,
245
- zero_tensors: bool = False,
246
- is_causal: bool = False,
247
- window_size_left: int = -1,
248
- window_size_right: int = -1,
249
- softcap: float = 0.0,
250
- deterministic: bool = False,
251
- gen: Optional[torch.Generator] = None,
252
- rng_state: Optional[torch.Tensor] = None,
253
- ) -> List[torch.Tensor]:
254
- """
255
- Backward pass for multi-head attention with variable sequence lengths.
256
-
257
- Args:
258
- dout: Gradient tensor of shape [batch_size, seqlen_q, num_heads, head_size]
259
- q: Query tensor of shape [batch_size, seqlen_q, num_heads, head_size]
260
- k: Key tensor of shape [batch_size, seqlen_k, num_heads_k, head_size]
261
- v: Value tensor of shape [batch_size, seqlen_k, num_heads_k, head_size]
262
- out: Output tensor from forward pass of shape [batch_size, seqlen_q, num_heads, head_size]
263
- softmax_lse: Log-sum-exp values from forward pass of shape [batch_size, num_heads, seqlen_q]
264
- cu_seqlens_q: Cumulative sequence lengths for queries of shape [batch_size+1]
265
- cu_seqlens_k: Cumulative sequence lengths for keys of shape [batch_size+1]
266
- dq: Optional gradient tensor for queries, same shape as q
267
- dk: Optional gradient tensor for keys, same shape as k
268
- dv: Optional gradient tensor for values, same shape as v
269
- alibi_slopes: Optional ALiBi slopes tensor of shape [num_heads] or [batch_size, num_heads]
270
- max_seqlen_q: Maximum sequence length for queries
271
- max_seqlen_k: Maximum sequence length for keys
272
- p_dropout: Dropout probability
273
- softmax_scale: Scale factor for softmax
274
- zero_tensors: Whether to zero tensors before computation
275
- is_causal: Whether to use causal attention
276
- window_size_left: Window size for left context (-1 for unlimited)
277
- window_size_right: Window size for right context (-1 for unlimited)
278
- softcap: Soft cap for attention weights
279
- deterministic: Whether to use deterministic algorithms
280
- gen: Optional random number generator
281
- rng_state: Optional RNG state from forward pass
282
-
283
- Returns:
284
- List of tensors: [dq, dk, dv]
285
- """
286
- if softmax_scale is None:
287
- attention_head_dim = q.shape[-1]
288
- softmax_scale = 1.0 / (attention_head_dim**0.5)
289
-
290
- return flash_attn_ops.varlen_bwd(
291
- dout,
292
- q,
293
- k,
294
- v,
295
- out,
296
- softmax_lse,
297
- dq,
298
- dk,
299
- dv,
300
- cu_seqlens_q,
301
- cu_seqlens_k,
302
- alibi_slopes,
303
- max_seqlen_q,
304
- max_seqlen_k,
305
- p_dropout,
306
- softmax_scale,
307
- zero_tensors,
308
- is_causal,
309
- window_size_left,
310
- window_size_right,
311
- softcap,
312
- deterministic,
313
- gen,
314
- rng_state,
315
- )
316
-
317
-
318
- def fwd_kvcache(
319
- q: torch.Tensor,
320
- kcache: torch.Tensor,
321
- vcache: torch.Tensor,
322
- k: Optional[torch.Tensor] = None,
323
- v: Optional[torch.Tensor] = None,
324
- seqlens_k: Optional[torch.Tensor] = None,
325
- rotary_cos: Optional[torch.Tensor] = None,
326
- rotary_sin: Optional[torch.Tensor] = None,
327
- cache_batch_idx: Optional[torch.Tensor] = None,
328
- leftpad_k: Optional[torch.Tensor] = None,
329
- block_table: Optional[torch.Tensor] = None,
330
- alibi_slopes: Optional[torch.Tensor] = None,
331
- out: Optional[torch.Tensor] = None,
332
- softmax_scale: Optional[float] = None,
333
- is_causal: bool = False,
334
- window_size_left: int = -1,
335
- window_size_right: int = -1,
336
- softcap: float = 0.0,
337
- is_rotary_interleaved: bool = False,
338
- num_splits: int = 1,
339
- ) -> List[torch.Tensor]:
340
- """
341
- Forward pass for multi-head attention with KV cache.
342
-
343
- Args:
344
- q: Query tensor of shape [batch_size, seqlen_q, num_heads, head_size]
345
- kcache: Key cache tensor of shape [batch_size_c, seqlen_k, num_heads_k, head_size] or [num_blocks, page_block_size, num_heads_k, head_size]
346
- vcache: Value cache tensor of shape [batch_size_c, seqlen_k, num_heads_k, head_size] or [num_blocks, page_block_size, num_heads_k, head_size]
347
- k: Optional new keys tensor of shape [batch_size, seqlen_knew, num_heads_k, head_size]
348
- v: Optional new values tensor of shape [batch_size, seqlen_knew, num_heads_k, head_size]
349
- seqlens_k: Optional sequence lengths for keys of shape [batch_size]
350
- rotary_cos: Optional rotary cosine tensor of shape [seqlen_ro, rotary_dim/2]
351
- rotary_sin: Optional rotary sine tensor of shape [seqlen_ro, rotary_dim/2]
352
- cache_batch_idx: Optional indices to index into the KV cache
353
- leftpad_k: Optional left padding for keys of shape [batch_size]
354
- block_table: Optional block table of shape [batch_size, max_num_blocks_per_seq]
355
- alibi_slopes: Optional ALiBi slopes tensor of shape [num_heads] or [batch_size, num_heads]
356
- out: Optional output tensor, same shape as q
357
- softmax_scale: Scale factor for softmax
358
- is_causal: Whether to use causal attention
359
- window_size_left: Window size for left context (-1 for unlimited)
360
- window_size_right: Window size for right context (-1 for unlimited)
361
- softcap: Soft cap for attention weights
362
- is_rotary_interleaved: Whether rotary embeddings are interleaved
363
- num_splits: Number of splits for computation
364
-
365
- Returns:
366
- List of tensors: [output, softmax_lse]
367
- """
368
- if softmax_scale is None:
369
- attention_head_dim = q.shape[-1]
370
- softmax_scale = 1.0 / (attention_head_dim**0.5)
371
-
372
- return flash_attn_ops.fwd_kvcache(
373
- q,
374
- kcache,
375
- vcache,
376
- k,
377
- v,
378
- seqlens_k,
379
- rotary_cos,
380
- rotary_sin,
381
- cache_batch_idx,
382
- leftpad_k,
383
- block_table,
384
- alibi_slopes,
385
- out,
386
- softmax_scale,
387
- is_causal,
388
- window_size_left,
389
- window_size_right,
390
- softcap,
391
- is_rotary_interleaved,
392
- num_splits,
393
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu126-x86_64-linux/flash_attn/_flash_attn_876ac68_dirty.abi3.so DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:b2dde56a1e2cca8b30a68fc9da5f238b1d44d23f8e9a77ed70d3b8147166f739
3
- size 448643480
 
 
 
 
build/torch26-cxx98-cu126-x86_64-linux/flash_attn/_ops.py DELETED
@@ -1,9 +0,0 @@
1
- import torch
2
- from . import _flash_attn_876ac68_dirty
3
- ops = torch.ops._flash_attn_876ac68_dirty
4
-
5
- def add_op_namespace_prefix(op_name: str):
6
- """
7
- Prefix op by namespace.
8
- """
9
- return f"_flash_attn_876ac68_dirty::{op_name}"
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu126-x86_64-linux/flash_attn/bert_padding.py DELETED
@@ -1,218 +0,0 @@
1
- # Adapted from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/padding.py
2
-
3
- import torch
4
- import torch.nn.functional as F
5
- from einops import rearrange, repeat
6
-
7
-
8
- class IndexFirstAxis(torch.autograd.Function):
9
- @staticmethod
10
- def forward(ctx, input, indices):
11
- ctx.save_for_backward(indices)
12
- assert input.ndim >= 2
13
- ctx.first_axis_dim, other_shape = input.shape[0], input.shape[1:]
14
- second_dim = other_shape.numel()
15
- # TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
16
- # return input[indices]
17
- return torch.gather(
18
- rearrange(input, "b ... -> b (...)"), 0, repeat(indices, "z -> z d", d=second_dim)
19
- ).reshape(-1, *other_shape)
20
-
21
- @staticmethod
22
- def backward(ctx, grad_output):
23
- (indices,) = ctx.saved_tensors
24
- assert grad_output.ndim >= 2
25
- other_shape = grad_output.shape[1:]
26
- grad_output = rearrange(grad_output, "b ... -> b (...)")
27
- grad_input = torch.zeros(
28
- [ctx.first_axis_dim, grad_output.shape[1]],
29
- device=grad_output.device,
30
- dtype=grad_output.dtype,
31
- )
32
- # TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
33
- # grad_input[indices] = grad_output
34
- grad_input.scatter_(0, repeat(indices, "z -> z d", d=grad_output.shape[1]), grad_output)
35
- return grad_input.reshape(ctx.first_axis_dim, *other_shape), None
36
-
37
-
38
- index_first_axis = IndexFirstAxis.apply
39
-
40
-
41
- class IndexPutFirstAxis(torch.autograd.Function):
42
- @staticmethod
43
- def forward(ctx, values, indices, first_axis_dim):
44
- ctx.save_for_backward(indices)
45
- assert indices.ndim == 1
46
- assert values.ndim >= 2
47
- output = torch.zeros(
48
- first_axis_dim, *values.shape[1:], device=values.device, dtype=values.dtype
49
- )
50
- # TD [2022-03-04] For some reason torch.scatter is a bit faster than indexing.
51
- output[indices] = values
52
- # output.scatter_(0, repeat(indices, 'z -> z d', d=values.shape[1]), values)
53
- return output
54
-
55
- @staticmethod
56
- def backward(ctx, grad_output):
57
- (indices,) = ctx.saved_tensors
58
- # TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
59
- grad_values = grad_output[indices]
60
- # grad_values = torch.gather(grad_output, 0, repeat(indices, 'z -> z d', d=grad_output.shape[1]))
61
- return grad_values, None, None
62
-
63
-
64
- index_put_first_axis = IndexPutFirstAxis.apply
65
-
66
-
67
- class IndexFirstAxisResidual(torch.autograd.Function):
68
- @staticmethod
69
- def forward(ctx, input, indices):
70
- ctx.save_for_backward(indices)
71
- assert input.ndim >= 2
72
- ctx.first_axis_dim, other_shape = input.shape[0], input.shape[1:]
73
- second_dim = other_shape.numel()
74
- # TD [2022-03-04] For some reason torch.gather is a bit faster than indexing.
75
- output = input[indices]
76
- # We don't want to reshape input (b ... -> b (...)) since it could change the channel_last
77
- # memory format to channel_first. In other words, input might not be contiguous.
78
- # If we don't detach, Pytorch complains about output being a view and is being modified inplace
79
- return output, input.detach()
80
-
81
- @staticmethod
82
- def backward(ctx, grad_output, grad_residual):
83
- (indices,) = ctx.saved_tensors
84
- assert grad_output.ndim >= 2
85
- other_shape = grad_output.shape[1:]
86
- assert grad_residual.shape[1:] == other_shape
87
- grad_input = grad_residual
88
- # grad_input[indices] += grad_output
89
- indices = indices.reshape(indices.shape[0], *((1,) * (grad_output.ndim - 1)))
90
- indices = indices.expand_as(grad_output)
91
- grad_input.scatter_add_(0, indices, grad_output)
92
- return grad_input.reshape(ctx.first_axis_dim, *other_shape), None
93
-
94
-
95
- index_first_axis_residual = IndexFirstAxisResidual.apply
96
-
97
-
98
- def unpad_input(hidden_states, attention_mask, unused_mask=None):
99
- """
100
- Arguments:
101
- hidden_states: (batch, seqlen, ...)
102
- attention_mask: (batch, seqlen), bool / int, 1 means valid and 0 means not valid.
103
- unused_mask: (batch, seqlen), bool / int, 1 means the element is allocated but unused.
104
- Return:
105
- hidden_states: (total_nnz, ...), where total_nnz = number of tokens selected in attention_mask + unused_mask.
106
- indices: (total_nnz), the indices of masked tokens from the flattened input sequence.
107
- cu_seqlens: (batch + 1), the cumulative sequence lengths, used to index into hidden_states.
108
- max_seqlen_in_batch: int
109
- seqused: (batch), returns the number of tokens selected in attention_mask + unused_mask.
110
- """
111
- all_masks = (attention_mask + unused_mask) if unused_mask is not None else attention_mask
112
- seqlens_in_batch = all_masks.sum(dim=-1, dtype=torch.int32)
113
- used_seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
114
- indices = torch.nonzero(all_masks.flatten(), as_tuple=False).flatten()
115
- max_seqlen_in_batch = seqlens_in_batch.max().item()
116
- cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
117
- # TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
118
- # bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
119
- # times larger than it needs to be, wasting memory. It's faster and more memory-efficient to
120
- # index with integer indices. Moreover, torch's index is a bit slower than it needs to be,
121
- # so we write custom forward and backward to make it a bit faster.
122
- return (
123
- index_first_axis(rearrange(hidden_states, "b s ... -> (b s) ..."), indices),
124
- indices,
125
- cu_seqlens,
126
- max_seqlen_in_batch,
127
- used_seqlens_in_batch,
128
- )
129
-
130
-
131
- def unpad_input_for_concatenated_sequences(hidden_states, attention_mask_in_length):
132
- """
133
- Supports concatenating short samples in one sequence. The attention_mask_in_length is utilized to mask other short samples. It helps efficient training of variant lengths-based samples (e.g., the supervised fine-tuning task in large language model).
134
- The motivation for this function is explained [here](https://github.com/Dao-AILab/flash-attention/issues/432#issuecomment-1668822286).
135
-
136
- For example, if batch = 3 and seqlen = 6, the attention_mask_in_length is:
137
- ```
138
- [
139
- [2, 3, 0, 0, 0, 0],
140
- [3, 2, 0, 0, 0, 0],
141
- [6, 0, 0, 0, 0, 0]
142
- ]
143
- ```
144
- , which refers to the 3D-attention mask:
145
- ```
146
- [
147
- [
148
- [1, 0, 0, 0, 0, 0],
149
- [1, 1, 0, 0, 0, 0],
150
- [0, 0, 1, 0, 0, 0],
151
- [0, 0, 1, 1, 0, 0],
152
- [0, 0, 1, 1, 1, 0],
153
- [0, 0, 0, 0, 0, 1]
154
- ],
155
- [
156
- [1, 0, 0, 0, 0, 0],
157
- [1, 1, 0, 0, 0, 0],
158
- [1, 1, 1, 0, 0, 0],
159
- [0, 0, 0, 1, 0, 0],
160
- [0, 0, 0, 1, 1, 0],
161
- [0, 0, 0, 0, 0, 1]
162
- ],
163
- [
164
- [1, 0, 0, 0, 0, 0],
165
- [1, 1, 0, 0, 0, 0],
166
- [1, 1, 1, 0, 0, 0],
167
- [1, 1, 1, 1, 0, 0],
168
- [1, 1, 1, 1, 1, 0],
169
- [1, 1, 1, 1, 1, 1]
170
- ]
171
- ]
172
- ```.
173
-
174
- Arguments:
175
- hidden_states: (batch, seqlen, ...)
176
- attention_mask_in_length: (batch, seqlen), int, a nonzero number (e.g., 1, 2, 3, etc.) means length of concatenated sequence in b-th batch, and 0 means none.
177
- Return:
178
- hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
179
- indices: (total_nnz), the indices of non-masked tokens from the flattened input sequence.
180
- cu_seqlens: (batch + 1), the cumulative sequence lengths, used to index into hidden_states.
181
- max_seqlen_in_batch: int
182
- """
183
- length = attention_mask_in_length.sum(dim=-1)
184
- seqlen = attention_mask_in_length.size(-1)
185
- attention_mask_2d = torch.arange(seqlen, device=length.device, dtype=length.dtype).expand(len(length), seqlen) < length.unsqueeze(1)
186
- real_indices_idx = torch.nonzero(attention_mask_in_length.flatten(), as_tuple=False).flatten()
187
- seqlens_in_batch = attention_mask_in_length.flatten()[real_indices_idx]
188
- indices = torch.nonzero(attention_mask_2d.flatten(), as_tuple=False).flatten()
189
- max_seqlen_in_batch = seqlens_in_batch.max().item()
190
- cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
191
- # TD [2022-03-04] We don't want to index with a bool mask, because Pytorch will expand the
192
- # bool mask, then call nonzero to get the indices, then index with those. The indices is @dim
193
- # times larger than it needs to be, wasting memory. It's faster and more memory-efficient to
194
- # index with integer indices. Moreover, torch's index is a bit slower than it needs to be,
195
- # so we write custom forward and backward to make it a bit faster.
196
- return (
197
- index_first_axis(rearrange(hidden_states, "b s ... -> (b s) ..."), indices),
198
- indices,
199
- cu_seqlens,
200
- max_seqlen_in_batch,
201
- )
202
-
203
-
204
- def pad_input(hidden_states, indices, batch, seqlen):
205
- """
206
- Arguments:
207
- hidden_states: (total_nnz, ...), where total_nnz = number of tokens in selected in attention_mask.
208
- indices: (total_nnz), the indices that represent the non-masked tokens of the original padded input sequence.
209
- batch: int, batch size for the padded sequence.
210
- seqlen: int, maximum sequence length for the padded sequence.
211
- Return:
212
- hidden_states: (batch, seqlen, ...)
213
- """
214
- dim = hidden_states.shape[-1]
215
- # output = torch.zeros((batch * seqlen), dim, device=hidden_states.device, dtype=hidden_states.dtype)
216
- # output[indices] = hidden_states
217
- output = index_put_first_axis(hidden_states, indices, batch * seqlen)
218
- return rearrange(output, "(b s) ... -> b s ...", b=batch)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu126-x86_64-linux/flash_attn/flash_attn_interface.py DELETED
@@ -1,1609 +0,0 @@
1
- # Copyright (c) 2023, Tri Dao.
2
-
3
- from typing import Optional, Sequence, Tuple, Union
4
-
5
- import torch
6
- import torch.nn as nn
7
- import os
8
-
9
- # # isort: off
10
- # # We need to import the CUDA kernels after importing torch
11
- # USE_TRITON_ROCM = os.getenv("FLASH_ATTENTION_TRITON_AMD_ENABLE", "FALSE") == "TRUE"
12
- # if USE_TRITON_ROCM:
13
- # from .flash_attn_triton_amd import interface_fa as flash_attn_gpu
14
- # else:
15
- # import flash_attn_2_cuda as flash_attn_gpu
16
-
17
-
18
- from ._ops import ops as flash_attn_gpu
19
-
20
- # # isort: on
21
-
22
- def maybe_contiguous(x):
23
- return x.contiguous() if x is not None and x.stride(-1) != 1 else x
24
-
25
-
26
- def _get_block_size_n(device, head_dim, is_dropout, is_causal):
27
- # This should match the block sizes in the CUDA kernel
28
- assert head_dim <= 256
29
- major, minor = torch.cuda.get_device_capability(device)
30
- is_sm8x = major == 8 and minor > 0 # Only include sm86 and sm89, exclude sm80 (A100)
31
- is_sm80 = major == 8 and minor == 0
32
- is_sm90 = major == 9 and minor == 0
33
- if head_dim <= 32:
34
- return 128
35
- if head_dim <= 64:
36
- return 128 if not is_dropout else 64
37
- elif head_dim <= 96:
38
- return 64
39
- elif head_dim <= 128:
40
- if is_sm8x:
41
- return 64 if (not is_dropout and is_causal) else 32
42
- else:
43
- return 64 if not is_dropout else 32
44
- elif head_dim <= 192:
45
- return 64
46
- elif head_dim <= 224:
47
- return 64
48
- elif head_dim <= 256:
49
- return 64
50
-
51
-
52
- def round_multiple(x, m):
53
- return (x + m - 1) // m * m
54
-
55
-
56
- # torch.compile() support is only enabled for pytorch >= 2.4
57
- # The reason for this is that we are using the new custom_op and register_fake
58
- # APIs, which support inplace modification of inputs in the function itself
59
- if torch.__version__ >= "2.4.0":
60
- _torch_custom_op_wrapper = torch.library.custom_op
61
- _torch_register_fake_wrapper = torch.library.register_fake
62
- else:
63
- def noop_custom_op_wrapper(name, fn=None, /, *, mutates_args, device_types=None, schema=None):
64
- def wrap(func):
65
- return func
66
- if fn is None:
67
- return wrap
68
- return fn
69
- def noop_register_fake_wrapper(op, fn=None, /, *, lib=None, _stacklevel=1):
70
- def wrap(func):
71
- return func
72
- if fn is None:
73
- return wrap
74
- return fn
75
- _torch_custom_op_wrapper = noop_custom_op_wrapper
76
- _torch_register_fake_wrapper = noop_register_fake_wrapper
77
-
78
-
79
- @_torch_custom_op_wrapper("flash_attn::_flash_attn_forward", mutates_args=(), device_types="cuda")
80
- def _flash_attn_forward(
81
- q: torch.Tensor,
82
- k: torch.Tensor,
83
- v: torch.Tensor,
84
- dropout_p: float,
85
- softmax_scale: float,
86
- causal: bool,
87
- window_size_left: int,
88
- window_size_right: int,
89
- softcap: float,
90
- alibi_slopes: Optional[torch.Tensor],
91
- return_softmax: bool
92
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
93
- q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
94
- out, softmax_lse, S_dmask, rng_state = flash_attn_gpu.fwd(
95
- q,
96
- k,
97
- v,
98
- None,
99
- alibi_slopes,
100
- dropout_p,
101
- softmax_scale,
102
- causal,
103
- window_size_left,
104
- window_size_right,
105
- softcap,
106
- return_softmax,
107
- None,
108
- )
109
- return out, softmax_lse, S_dmask, rng_state
110
-
111
-
112
- @_torch_register_fake_wrapper("flash_attn::_flash_attn_forward")
113
- def _flash_attn_forward_fake(
114
- q: torch.Tensor,
115
- k: torch.Tensor,
116
- v: torch.Tensor,
117
- dropout_p: float,
118
- softmax_scale: float,
119
- causal: bool,
120
- window_size_left: int,
121
- window_size_right: int,
122
- softcap: float,
123
- alibi_slopes: Optional[torch.Tensor],
124
- return_softmax: bool
125
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
126
- q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
127
- batch_size, seqlen_q, num_heads, head_size = q.shape
128
- seqlen_k = k.shape[1]
129
- out = torch.empty_like(q)
130
- softmax_lse = torch.empty((batch_size, num_heads, seqlen_q), dtype=torch.float32, device=q.device, layout=q.layout)
131
- p = torch.empty((0,), dtype=q.dtype, device=q.device, layout=q.layout)
132
- if return_softmax:
133
- p = torch.empty((batch_size, num_heads, round_multiple(seqlen_q, 128), round_multiple(seqlen_k, 128)), dtype=q.dtype, device=q.device, layout=q.layout)
134
- rng_state = torch.empty((2,), dtype=torch.int64, device=q.device)
135
-
136
- return out, softmax_lse, p, rng_state
137
-
138
-
139
- if torch.__version__ >= "2.4.0":
140
- _wrapped_flash_attn_forward = torch.ops.flash_attn._flash_attn_forward
141
- else:
142
- _wrapped_flash_attn_forward = _flash_attn_forward
143
-
144
-
145
- @_torch_custom_op_wrapper("flash_attn::_flash_attn_varlen_forward", mutates_args=(), device_types="cuda")
146
- def _flash_attn_varlen_forward(
147
- q: torch.Tensor,
148
- k: torch.Tensor,
149
- v: torch.Tensor,
150
- cu_seqlens_q: torch.Tensor,
151
- cu_seqlens_k: torch.Tensor,
152
- max_seqlen_q: int,
153
- max_seqlen_k: int,
154
- dropout_p: float,
155
- softmax_scale: float,
156
- causal: bool,
157
- window_size_left: int = -1,
158
- window_size_right: int = -1,
159
- softcap: float = 0.0,
160
- alibi_slopes: Optional[torch.Tensor] = None,
161
- return_softmax: bool = False,
162
- block_table: Optional[torch.Tensor] = None,
163
- leftpad_k: Optional[torch.Tensor] = None,
164
- seqused_k: Optional[torch.Tensor] = None,
165
- zero_tensors: bool = False,
166
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
167
- q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
168
- out, softmax_lse, S_dmask, rng_state = flash_attn_gpu.varlen_fwd(
169
- q,
170
- k,
171
- v,
172
- None,
173
- cu_seqlens_q,
174
- cu_seqlens_k,
175
- seqused_k,
176
- leftpad_k,
177
- block_table,
178
- alibi_slopes,
179
- max_seqlen_q,
180
- max_seqlen_k,
181
- dropout_p,
182
- softmax_scale,
183
- zero_tensors,
184
- causal,
185
- window_size_left,
186
- window_size_right,
187
- softcap,
188
- return_softmax,
189
- None,
190
- )
191
- # if out.isnan().any() or softmax_lse.isnan().any():
192
- # breakpoint()
193
- return out, softmax_lse, S_dmask, rng_state
194
-
195
-
196
- @_torch_register_fake_wrapper("flash_attn::_flash_attn_varlen_forward")
197
- def _flash_attn_varlen_forward_fake(
198
- q: torch.Tensor,
199
- k: torch.Tensor,
200
- v: torch.Tensor,
201
- cu_seqlens_q: torch.Tensor,
202
- cu_seqlens_k: torch.Tensor,
203
- max_seqlen_q: int,
204
- max_seqlen_k: int,
205
- dropout_p: float,
206
- softmax_scale: float,
207
- causal: bool,
208
- window_size_left: int = -1,
209
- window_size_right: int = -1,
210
- softcap: float = 0.0,
211
- alibi_slopes: Optional[torch.Tensor] = None,
212
- return_softmax: bool = False,
213
- block_table: Optional[torch.Tensor] = None,
214
- leftpad_k: Optional[torch.Tensor] = None,
215
- seqused_k: Optional[torch.Tensor] = None,
216
- zero_tensors: bool = False,
217
- ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
218
- q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
219
- paged_kv = block_table is not None
220
- batch_size = cu_seqlens_q.numel() - 1
221
- total_q, num_heads, _ = q.shape
222
-
223
- out = torch.empty_like(q)
224
- softmax_lse = torch.empty((num_heads, total_q), dtype=torch.float32, device=q.device, layout=q.layout)
225
- p = torch.empty((0,), dtype=q.dtype, device=q.device, layout=q.layout)
226
- seqlen_q_rounded = round_multiple(max_seqlen_q, 128)
227
- seqlen_k_rounded = round_multiple(max_seqlen_k, 128)
228
- if return_softmax:
229
- p = torch.empty((batch_size, num_heads, seqlen_q_rounded, seqlen_k_rounded), dtype=q.dtype, device=q.device, layout=q.layout)
230
- rng_state = torch.empty((2,), dtype=torch.int64, device=q.device)
231
- return out, softmax_lse, p, rng_state
232
-
233
-
234
- if torch.__version__ >= "2.4.0":
235
- _wrapped_flash_attn_varlen_forward = torch.ops.flash_attn._flash_attn_varlen_forward
236
- else:
237
- _wrapped_flash_attn_varlen_forward = _flash_attn_varlen_forward
238
-
239
-
240
- @_torch_custom_op_wrapper("flash_attn::_flash_attn_backward", mutates_args=("dq", "dk", "dv"), device_types="cuda")
241
- def _flash_attn_backward(
242
- dout: torch.Tensor,
243
- q: torch.Tensor,
244
- k: torch.Tensor,
245
- v: torch.Tensor,
246
- out: torch.Tensor,
247
- softmax_lse: torch.Tensor,
248
- dq: Optional[torch.Tensor],
249
- dk: Optional[torch.Tensor],
250
- dv: Optional[torch.Tensor],
251
- dropout_p: float,
252
- softmax_scale: float,
253
- causal: bool,
254
- window_size_left: int,
255
- window_size_right: int,
256
- softcap: float,
257
- alibi_slopes: Optional[torch.Tensor],
258
- deterministic: bool,
259
- rng_state: Optional[torch.Tensor] = None,
260
- ) -> torch.Tensor:
261
- # dq, dk, dv are allocated by us so they should already be contiguous
262
- dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
263
- (
264
- dq,
265
- dk,
266
- dv,
267
- softmax_d,
268
- ) = flash_attn_gpu.bwd(
269
- dout,
270
- q,
271
- k,
272
- v,
273
- out,
274
- softmax_lse,
275
- dq,
276
- dk,
277
- dv,
278
- alibi_slopes,
279
- dropout_p,
280
- softmax_scale,
281
- causal,
282
- window_size_left,
283
- window_size_right,
284
- softcap,
285
- deterministic,
286
- None,
287
- rng_state,
288
- )
289
- return softmax_d
290
-
291
-
292
- @_torch_register_fake_wrapper("flash_attn::_flash_attn_backward")
293
- def _flash_attn_backward_fake(
294
- dout: torch.Tensor,
295
- q: torch.Tensor,
296
- k: torch.Tensor,
297
- v: torch.Tensor,
298
- out: torch.Tensor,
299
- softmax_lse: torch.Tensor,
300
- dq: Optional[torch.Tensor],
301
- dk: Optional[torch.Tensor],
302
- dv: Optional[torch.Tensor],
303
- dropout_p: float,
304
- softmax_scale: float,
305
- causal: bool,
306
- window_size_left: int,
307
- window_size_right: int,
308
- softcap: float,
309
- alibi_slopes: Optional[torch.Tensor],
310
- deterministic: bool,
311
- rng_state: Optional[torch.Tensor] = None,
312
- ) -> torch.Tensor:
313
- dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
314
- if dq is None:
315
- dq = torch.empty_like(q)
316
- if dk is None:
317
- dk = torch.empty_like(k)
318
- if dv is None:
319
- dv = torch.empty_like(v)
320
- batch_size, seqlen_q, num_heads, _ = q.shape
321
- softmax_d = torch.empty((batch_size, num_heads, round_multiple(seqlen_q, 128)), device=q.device, dtype=torch.float32)
322
-
323
- return softmax_d
324
-
325
-
326
- if torch.__version__ >= "2.4.0":
327
- _wrapped_flash_attn_backward = torch.ops.flash_attn._flash_attn_backward
328
- else:
329
- _wrapped_flash_attn_backward = _flash_attn_backward
330
-
331
-
332
- @_torch_custom_op_wrapper("flash_attn::_flash_attn_varlen_backward", mutates_args=("dq", "dk", "dv"), device_types="cuda")
333
- def _flash_attn_varlen_backward(
334
- dout: torch.Tensor,
335
- q: torch.Tensor,
336
- k: torch.Tensor,
337
- v: torch.Tensor,
338
- out: torch.Tensor,
339
- softmax_lse: torch.Tensor,
340
- dq: Optional[torch.Tensor],
341
- dk: Optional[torch.Tensor],
342
- dv: Optional[torch.Tensor],
343
- cu_seqlens_q: torch.Tensor,
344
- cu_seqlens_k: torch.Tensor,
345
- max_seqlen_q: int,
346
- max_seqlen_k: int,
347
- dropout_p: float,
348
- softmax_scale: float,
349
- causal: bool,
350
- window_size_left: int,
351
- window_size_right: int,
352
- softcap: float,
353
- alibi_slopes: Optional[torch.Tensor],
354
- deterministic: bool,
355
- rng_state: Optional[torch.Tensor] = None,
356
- zero_tensors: bool = False,
357
- ) -> torch.Tensor:
358
- # dq, dk, dv are allocated by us so they should already be contiguous
359
- dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
360
- (
361
- dq,
362
- dk,
363
- dv,
364
- softmax_d,
365
- ) = flash_attn_gpu.varlen_bwd(
366
- dout,
367
- q,
368
- k,
369
- v,
370
- out,
371
- softmax_lse,
372
- dq,
373
- dk,
374
- dv,
375
- cu_seqlens_q,
376
- cu_seqlens_k,
377
- alibi_slopes,
378
- max_seqlen_q,
379
- max_seqlen_k,
380
- dropout_p,
381
- softmax_scale,
382
- zero_tensors,
383
- causal,
384
- window_size_left,
385
- window_size_right,
386
- softcap,
387
- deterministic,
388
- None,
389
- rng_state,
390
- )
391
- # if dk.isnan().any() or dk.isnan().any() or dv.isnan().any() or softmax_d.isnan().any():
392
- # breakpoint()
393
- return softmax_d
394
-
395
-
396
- @_torch_register_fake_wrapper("flash_attn::_flash_attn_varlen_backward")
397
- def _flash_attn_varlen_backward_fake(
398
- dout: torch.Tensor,
399
- q: torch.Tensor,
400
- k: torch.Tensor,
401
- v: torch.Tensor,
402
- out: torch.Tensor,
403
- softmax_lse: torch.Tensor,
404
- dq: Optional[torch.Tensor],
405
- dk: Optional[torch.Tensor],
406
- dv: Optional[torch.Tensor],
407
- cu_seqlens_q: torch.Tensor,
408
- cu_seqlens_k: torch.Tensor,
409
- max_seqlen_q: int,
410
- max_seqlen_k: int,
411
- dropout_p: float,
412
- softmax_scale: float,
413
- causal: bool,
414
- window_size_left: int,
415
- window_size_right: int,
416
- softcap: float,
417
- alibi_slopes: Optional[torch.Tensor],
418
- deterministic: bool,
419
- rng_state: Optional[torch.Tensor] = None,
420
- zero_tensors: bool = False,
421
- ) -> torch.Tensor:
422
- dout, q, k, v, out = [maybe_contiguous(x) for x in (dout, q, k, v, out)]
423
- batch_size = cu_seqlens_q.numel() - 1
424
- total_q, num_heads, _ = q.shape
425
-
426
- if dq is None:
427
- dq = torch.empty_like(q)
428
- if dk is None:
429
- dk = torch.empty_like(k)
430
- if dv is None:
431
- dv = torch.empty_like(v)
432
- softmax_d = torch.empty((num_heads, total_q + 128 * batch_size), device=q.device, dtype=torch.float32)
433
-
434
- return softmax_d
435
-
436
-
437
- if torch.__version__ >= "2.4.0":
438
- _wrapped_flash_attn_varlen_backward = torch.ops.flash_attn._flash_attn_varlen_backward
439
- else:
440
- _wrapped_flash_attn_varlen_backward = _flash_attn_varlen_backward
441
-
442
-
443
- class FlashAttnQKVPackedFunc(torch.autograd.Function):
444
- @staticmethod
445
- def forward(
446
- ctx,
447
- qkv,
448
- dropout_p,
449
- softmax_scale,
450
- causal,
451
- window_size,
452
- softcap,
453
- alibi_slopes,
454
- deterministic,
455
- return_softmax,
456
- is_grad_enabled,
457
- ):
458
- is_grad = is_grad_enabled and qkv.requires_grad
459
- if softmax_scale is None:
460
- softmax_scale = qkv.shape[-1] ** (-0.5)
461
- q, k, v = qkv[:, :, 0].detach(), qkv[:, :, 1].detach(), qkv[:, :, 2].detach()
462
- head_size_og = q.size(3)
463
- if head_size_og % 8 != 0:
464
- q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
465
- k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
466
- v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
467
- out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_forward(
468
- q,
469
- k,
470
- v,
471
- dropout_p,
472
- softmax_scale,
473
- causal=causal,
474
- window_size_left=window_size[0],
475
- window_size_right=window_size[1],
476
- softcap=softcap,
477
- alibi_slopes=alibi_slopes,
478
- return_softmax=return_softmax and dropout_p > 0,
479
- )
480
- if is_grad:
481
- ctx.save_for_backward(q, k, v, out_padded, softmax_lse, rng_state)
482
- ctx.dropout_p = dropout_p
483
- ctx.softmax_scale = softmax_scale
484
- ctx.causal = causal
485
- ctx.window_size = window_size
486
- ctx.softcap = softcap
487
- ctx.alibi_slopes = alibi_slopes
488
- ctx.deterministic = deterministic
489
- out = out_padded[..., :head_size_og]
490
- return out if not return_softmax else (out, softmax_lse, S_dmask)
491
-
492
- @staticmethod
493
- def backward(ctx, dout, *args):
494
- q, k, v, out, softmax_lse, rng_state = ctx.saved_tensors
495
- qkv_shape = q.shape[:-2] + (3, *q.shape[-2:])
496
- dqkv = torch.empty(qkv_shape, dtype=q.dtype, device=q.device)
497
- head_size_og = dout.size(3)
498
- dout_padded = dout
499
- if head_size_og % 8 != 0:
500
- dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
501
- _wrapped_flash_attn_backward(
502
- dout_padded,
503
- q,
504
- k,
505
- v,
506
- out,
507
- softmax_lse,
508
- dqkv[:, :, 0],
509
- dqkv[:, :, 1],
510
- dqkv[:, :, 2],
511
- ctx.dropout_p,
512
- ctx.softmax_scale,
513
- ctx.causal,
514
- ctx.window_size[0],
515
- ctx.window_size[1],
516
- ctx.softcap,
517
- ctx.alibi_slopes,
518
- ctx.deterministic,
519
- rng_state=rng_state,
520
- )
521
- dqkv = dqkv[..., : dout.shape[-1]] # We could have padded the head dimension
522
- return dqkv, None, None, None, None, None, None, None, None, None
523
-
524
-
525
- class FlashAttnVarlenQKVPackedFunc(torch.autograd.Function):
526
- @staticmethod
527
- def forward(
528
- ctx,
529
- qkv,
530
- cu_seqlens,
531
- max_seqlen,
532
- dropout_p,
533
- softmax_scale,
534
- causal,
535
- window_size,
536
- softcap,
537
- alibi_slopes,
538
- deterministic,
539
- return_softmax,
540
- is_grad_enabled,
541
- ):
542
- is_grad = is_grad_enabled and qkv.requires_grad
543
- if softmax_scale is None:
544
- softmax_scale = qkv.shape[-1] ** (-0.5)
545
- q, k, v = qkv[:, 0].detach(), qkv[:, 1].detach(), qkv[:, 2].detach()
546
- head_size_og = q.size(2)
547
- if head_size_og % 8 != 0:
548
- q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
549
- k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
550
- v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
551
- out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_varlen_forward(
552
- q,
553
- k,
554
- v,
555
- cu_seqlens,
556
- cu_seqlens,
557
- max_seqlen,
558
- max_seqlen,
559
- dropout_p,
560
- softmax_scale,
561
- causal=causal,
562
- window_size_left=window_size[0],
563
- window_size_right=window_size[1],
564
- softcap=softcap,
565
- alibi_slopes=alibi_slopes,
566
- return_softmax=return_softmax and dropout_p > 0,
567
- block_table=None,
568
- )
569
- if is_grad:
570
- ctx.save_for_backward(q, k, v, out_padded, softmax_lse, cu_seqlens, rng_state)
571
- ctx.dropout_p = dropout_p
572
- ctx.max_seqlen = max_seqlen
573
- ctx.softmax_scale = softmax_scale
574
- ctx.causal = causal
575
- ctx.window_size = window_size
576
- ctx.softcap = softcap
577
- ctx.alibi_slopes = alibi_slopes
578
- ctx.deterministic = deterministic
579
- out = out_padded[..., :head_size_og]
580
- return out if not return_softmax else (out, softmax_lse, S_dmask)
581
-
582
- @staticmethod
583
- def backward(ctx, dout, *args):
584
- q, k, v, out, softmax_lse, cu_seqlens, rng_state = ctx.saved_tensors
585
- qkv_shape = q.shape[:-2] + (3, *q.shape[-2:])
586
- dqkv = torch.empty(qkv_shape, dtype=q.dtype, device=q.device)
587
- head_size_og = dout.size(2)
588
- dout_padded = dout
589
- if head_size_og % 8 != 0:
590
- dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
591
- _wrapped_flash_attn_varlen_backward(
592
- dout_padded,
593
- q,
594
- k,
595
- v,
596
- out,
597
- softmax_lse,
598
- dqkv[:, 0],
599
- dqkv[:, 1],
600
- dqkv[:, 2],
601
- cu_seqlens,
602
- cu_seqlens,
603
- ctx.max_seqlen,
604
- ctx.max_seqlen,
605
- ctx.dropout_p,
606
- ctx.softmax_scale,
607
- ctx.causal,
608
- ctx.window_size[0],
609
- ctx.window_size[1],
610
- ctx.softcap,
611
- ctx.alibi_slopes,
612
- ctx.deterministic,
613
- rng_state=rng_state,
614
- )
615
- dqkv = dqkv[..., : dout.shape[-1]] # We could have padded the head dimension
616
- return dqkv, None, None, None, None, None, None, None, None, None, None, None
617
-
618
-
619
- class FlashAttnKVPackedFunc(torch.autograd.Function):
620
- @staticmethod
621
- def forward(
622
- ctx,
623
- q,
624
- kv,
625
- dropout_p,
626
- softmax_scale,
627
- causal,
628
- window_size,
629
- softcap,
630
- alibi_slopes,
631
- deterministic,
632
- return_softmax,
633
- is_grad_enabled,
634
- ):
635
- is_grad = is_grad_enabled and any(
636
- x.requires_grad for x in [q, kv]
637
- )
638
- if softmax_scale is None:
639
- softmax_scale = q.shape[-1] ** (-0.5)
640
- k, v = kv[:, :, 0].detach(), kv[:, :, 1].detach()
641
- head_size_og = q.size(3)
642
- if head_size_og % 8 != 0:
643
- q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
644
- k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
645
- v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
646
- out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_forward(
647
- q,
648
- k,
649
- v,
650
- dropout_p,
651
- softmax_scale,
652
- causal=causal,
653
- window_size_left=window_size[0],
654
- window_size_right=window_size[1],
655
- softcap=softcap,
656
- alibi_slopes=alibi_slopes,
657
- return_softmax=return_softmax and dropout_p > 0,
658
- )
659
- if is_grad:
660
- ctx.save_for_backward(q, k, v, out_padded, softmax_lse, rng_state)
661
- ctx.dropout_p = dropout_p
662
- ctx.softmax_scale = softmax_scale
663
- ctx.causal = causal
664
- ctx.window_size = window_size
665
- ctx.softcap = softcap
666
- ctx.alibi_slopes = alibi_slopes
667
- ctx.deterministic = deterministic
668
- out = out_padded[..., :head_size_og]
669
- return out if not return_softmax else (out, softmax_lse, S_dmask)
670
-
671
- @staticmethod
672
- def backward(ctx, dout, *args):
673
- q, k, v, out, softmax_lse, rng_state = ctx.saved_tensors
674
- dq = torch.empty_like(q)
675
- kv_shape = k.shape[:-2] + (2, *k.shape[-2:])
676
- dkv = torch.empty(kv_shape, dtype=k.dtype, device=k.device)
677
- head_size_og = dout.size(3)
678
- dout_padded = dout
679
- if head_size_og % 8 != 0:
680
- dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
681
- _wrapped_flash_attn_backward(
682
- dout_padded,
683
- q,
684
- k,
685
- v,
686
- out,
687
- softmax_lse,
688
- dq,
689
- dkv[:, :, 0],
690
- dkv[:, :, 1],
691
- ctx.dropout_p,
692
- ctx.softmax_scale,
693
- ctx.causal,
694
- ctx.window_size[0],
695
- ctx.window_size[1],
696
- ctx.softcap,
697
- ctx.alibi_slopes,
698
- ctx.deterministic,
699
- rng_state=rng_state,
700
- )
701
- dq = dq[..., : dout.shape[-1]] # We could have padded the head dimension
702
- dkv = dkv[..., : dout.shape[-1]]
703
- return dq, dkv, None, None, None, None, None, None, None, None, None
704
-
705
-
706
- class FlashAttnVarlenKVPackedFunc(torch.autograd.Function):
707
- @staticmethod
708
- def forward(
709
- ctx,
710
- q,
711
- kv,
712
- cu_seqlens_q,
713
- cu_seqlens_k,
714
- max_seqlen_q,
715
- max_seqlen_k,
716
- dropout_p,
717
- softmax_scale,
718
- causal,
719
- window_size,
720
- softcap,
721
- alibi_slopes,
722
- deterministic,
723
- return_softmax,
724
- is_grad_enabled,
725
- ):
726
- is_grad = is_grad_enabled and any(
727
- x.requires_grad for x in [q, kv]
728
- )
729
- if softmax_scale is None:
730
- softmax_scale = q.shape[-1] ** (-0.5)
731
- k, v = kv[:, 0].detach(), kv[:, 1].detach()
732
- head_size_og = q.size(2)
733
- if head_size_og % 8 != 0:
734
- q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
735
- k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
736
- v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
737
- out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_varlen_forward(
738
- q,
739
- k,
740
- v,
741
- cu_seqlens_q,
742
- cu_seqlens_k,
743
- max_seqlen_q,
744
- max_seqlen_k,
745
- dropout_p,
746
- softmax_scale,
747
- causal=causal,
748
- window_size_left=window_size[0],
749
- window_size_right=window_size[1],
750
- softcap=softcap,
751
- alibi_slopes=alibi_slopes,
752
- return_softmax=return_softmax and dropout_p > 0,
753
- block_table=None,
754
- )
755
- if is_grad:
756
- ctx.save_for_backward(
757
- q, k, v, out_padded, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state
758
- )
759
- ctx.dropout_p = dropout_p
760
- ctx.max_seqlen_q = max_seqlen_q
761
- ctx.max_seqlen_k = max_seqlen_k
762
- ctx.softmax_scale = softmax_scale
763
- ctx.causal = causal
764
- ctx.window_size = window_size
765
- ctx.softcap = softcap
766
- ctx.alibi_slopes = alibi_slopes
767
- ctx.deterministic = deterministic
768
- out = out_padded[..., :head_size_og]
769
- return out if not return_softmax else (out, softmax_lse, S_dmask)
770
-
771
- @staticmethod
772
- def backward(ctx, dout, *args):
773
- q, k, v, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state = ctx.saved_tensors
774
- dq = torch.empty_like(q)
775
- kv_shape = k.shape[:-2] + (2, *k.shape[-2:])
776
- dkv = torch.empty(kv_shape, dtype=k.dtype, device=k.device)
777
- head_size_og = dout.size(2)
778
- dout_padded = dout
779
- if head_size_og % 8 != 0:
780
- dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
781
- _wrapped_flash_attn_varlen_backward(
782
- dout_padded,
783
- q,
784
- k,
785
- v,
786
- out,
787
- softmax_lse,
788
- dq,
789
- dkv[:, 0],
790
- dkv[:, 1],
791
- cu_seqlens_q,
792
- cu_seqlens_k,
793
- ctx.max_seqlen_q,
794
- ctx.max_seqlen_k,
795
- ctx.dropout_p,
796
- ctx.softmax_scale,
797
- ctx.causal,
798
- ctx.window_size[0],
799
- ctx.window_size[1],
800
- ctx.softcap,
801
- ctx.alibi_slopes,
802
- ctx.deterministic,
803
- rng_state=rng_state,
804
- )
805
- dq = dq[..., : dout.shape[-1]] # We could have padded the head dimension
806
- dkv = dkv[..., : dout.shape[-1]]
807
- return dq, dkv, None, None, None, None, None, None, None, None, None, None, None, None, None
808
-
809
-
810
- class FlashAttnFunc(torch.autograd.Function):
811
- @staticmethod
812
- def forward(
813
- ctx,
814
- q,
815
- k,
816
- v,
817
- dropout_p,
818
- softmax_scale,
819
- causal,
820
- window_size,
821
- softcap,
822
- alibi_slopes,
823
- deterministic,
824
- return_softmax,
825
- is_grad_enabled,
826
- ):
827
- is_grad = is_grad_enabled and any(
828
- x.requires_grad for x in [q, k, v]
829
- )
830
- if softmax_scale is None:
831
- softmax_scale = q.shape[-1] ** (-0.5)
832
- head_size_og = q.size(3)
833
- if head_size_og % 8 != 0:
834
- q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
835
- k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
836
- v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
837
- out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_forward(
838
- q,
839
- k,
840
- v,
841
- dropout_p,
842
- softmax_scale,
843
- causal=causal,
844
- window_size_left=window_size[0],
845
- window_size_right=window_size[1],
846
- softcap=softcap,
847
- alibi_slopes=alibi_slopes,
848
- return_softmax=return_softmax and dropout_p > 0,
849
- )
850
- if is_grad:
851
- ctx.save_for_backward(q, k, v, out_padded, softmax_lse, rng_state)
852
- ctx.dropout_p = dropout_p
853
- ctx.softmax_scale = softmax_scale
854
- ctx.causal = causal
855
- ctx.window_size = window_size
856
- ctx.softcap = softcap
857
- ctx.alibi_slopes = alibi_slopes
858
- ctx.deterministic = deterministic
859
- out = out_padded[..., :head_size_og]
860
- return out if not return_softmax else (out, softmax_lse, S_dmask)
861
-
862
- @staticmethod
863
- def backward(ctx, dout, *args):
864
- q, k, v, out, softmax_lse, rng_state = ctx.saved_tensors
865
- dq, dk, dv = torch.empty_like(q), torch.empty_like(k), torch.empty_like(v)
866
- head_size_og = dout.size(3)
867
- dout_padded = dout
868
- if head_size_og % 8 != 0:
869
- dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
870
- _wrapped_flash_attn_backward(
871
- dout_padded,
872
- q,
873
- k,
874
- v,
875
- out,
876
- softmax_lse,
877
- dq,
878
- dk,
879
- dv,
880
- ctx.dropout_p,
881
- ctx.softmax_scale,
882
- ctx.causal,
883
- ctx.window_size[0],
884
- ctx.window_size[1],
885
- ctx.softcap,
886
- ctx.alibi_slopes,
887
- ctx.deterministic,
888
- rng_state=rng_state,
889
- )
890
- dq = dq[..., : dout.shape[-1]] # We could have padded the head dimension
891
- dk = dk[..., : dout.shape[-1]]
892
- dv = dv[..., : dout.shape[-1]]
893
- return dq, dk, dv, None, None, None, None, None, None, None, None, None
894
-
895
-
896
- class FlashAttnVarlenFunc(torch.autograd.Function):
897
- @staticmethod
898
- def forward(
899
- ctx,
900
- q,
901
- k,
902
- v,
903
- cu_seqlens_q,
904
- cu_seqlens_k,
905
- max_seqlen_q,
906
- max_seqlen_k,
907
- dropout_p,
908
- softmax_scale,
909
- causal,
910
- window_size,
911
- softcap,
912
- alibi_slopes,
913
- deterministic,
914
- return_softmax,
915
- block_table,
916
- is_grad_enabled,
917
- ):
918
- is_grad = is_grad_enabled and any(
919
- x.requires_grad for x in [q, k, v]
920
- )
921
- if softmax_scale is None:
922
- softmax_scale = q.shape[-1] ** (-0.5)
923
- head_size_og = q.size(2)
924
- if head_size_og % 8 != 0:
925
- q = torch.nn.functional.pad(q, [0, 8 - head_size_og % 8])
926
- k = torch.nn.functional.pad(k, [0, 8 - head_size_og % 8])
927
- v = torch.nn.functional.pad(v, [0, 8 - head_size_og % 8])
928
- out_padded, softmax_lse, S_dmask, rng_state = _wrapped_flash_attn_varlen_forward(
929
- q,
930
- k,
931
- v,
932
- cu_seqlens_q,
933
- cu_seqlens_k,
934
- max_seqlen_q,
935
- max_seqlen_k,
936
- dropout_p,
937
- softmax_scale,
938
- causal=causal,
939
- window_size_left=window_size[0],
940
- window_size_right=window_size[1],
941
- softcap=softcap,
942
- alibi_slopes=alibi_slopes,
943
- return_softmax=return_softmax and dropout_p > 0,
944
- block_table=block_table,
945
- )
946
- if is_grad:
947
- ctx.save_for_backward(
948
- q, k, v, out_padded, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state
949
- )
950
- ctx.dropout_p = dropout_p
951
- ctx.max_seqlen_q = max_seqlen_q
952
- ctx.max_seqlen_k = max_seqlen_k
953
- ctx.softmax_scale = softmax_scale
954
- ctx.causal = causal
955
- ctx.window_size = window_size
956
- ctx.softcap = softcap
957
- ctx.alibi_slopes = alibi_slopes
958
- ctx.deterministic = deterministic
959
-
960
- out = out_padded[..., :head_size_og]
961
- return out if not return_softmax else (out, softmax_lse, S_dmask)
962
-
963
- @staticmethod
964
- def backward(ctx, dout, *args):
965
- q, k, v, out, softmax_lse, cu_seqlens_q, cu_seqlens_k, rng_state = ctx.saved_tensors
966
- dq, dk, dv = torch.empty_like(q), torch.empty_like(k), torch.empty_like(v)
967
- head_size_og = dout.size(2)
968
- dout_padded = dout
969
- if head_size_og % 8 != 0:
970
- dout_padded = torch.nn.functional.pad(dout, [0, 8 - head_size_og % 8])
971
- _wrapped_flash_attn_varlen_backward(
972
- dout_padded,
973
- q,
974
- k,
975
- v,
976
- out,
977
- softmax_lse,
978
- dq,
979
- dk,
980
- dv,
981
- cu_seqlens_q,
982
- cu_seqlens_k,
983
- ctx.max_seqlen_q,
984
- ctx.max_seqlen_k,
985
- ctx.dropout_p,
986
- ctx.softmax_scale,
987
- ctx.causal,
988
- ctx.window_size[0],
989
- ctx.window_size[1],
990
- ctx.softcap,
991
- ctx.alibi_slopes,
992
- ctx.deterministic,
993
- rng_state=rng_state,
994
- )
995
- dq = dq[..., : dout.shape[-1]] # We could have padded the head dimension
996
- dk = dk[..., : dout.shape[-1]]
997
- dv = dv[..., : dout.shape[-1]]
998
- return dq, dk, dv, None, None, None, None, None, None, None, None, None, None, None, None, None, None
999
-
1000
-
1001
- def flash_attn_qkvpacked_func(
1002
- qkv,
1003
- dropout_p=0.0,
1004
- softmax_scale=None,
1005
- causal=False,
1006
- window_size=(-1, -1), # -1 means infinite context window
1007
- softcap=0.0, # <=0.0 means deactivate
1008
- alibi_slopes=None,
1009
- deterministic=False,
1010
- return_attn_probs=False,
1011
- ):
1012
- """dropout_p should be set to 0.0 during evaluation
1013
- If Q, K, V are already stacked into 1 tensor, this function will be faster than
1014
- calling flash_attn_func on Q, K, V since the backward pass avoids explicit concatenation
1015
- of the gradients of Q, K, V.
1016
- For multi-query and grouped-query attention (MQA/GQA), please see
1017
- flash_attn_kvpacked_func and flash_attn_func.
1018
-
1019
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
1020
- will only attend to keys between [i - window_size[0], i + window_size[1]] inclusive.
1021
-
1022
- Arguments:
1023
- qkv: (batch_size, seqlen, 3, nheads, headdim)
1024
- dropout_p: float. Dropout probability.
1025
- softmax_scale: float. The scaling of QK^T before applying softmax.
1026
- Default to 1 / sqrt(headdim).
1027
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1028
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1029
- softcap: float. Anything > 0 activates softcapping attention.
1030
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of (-alibi_slope * |i - j|) is added to
1031
- the attention score of query i and key j.
1032
- deterministic: bool. Whether to use the deterministic implementation of the backward pass,
1033
- which is slightly slower and uses more memory. The forward pass is always deterministic.
1034
- return_attn_probs: bool. Whether to return the attention probabilities. This option is for
1035
- testing only. The returned probabilities are not guaranteed to be correct
1036
- (they might not have the right scaling).
1037
- Return:
1038
- out: (batch_size, seqlen, nheads, headdim).
1039
- softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
1040
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1041
- normalization factor).
1042
- S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
1043
- The output of softmax (possibly with different scaling). It also encodes the dropout
1044
- pattern (negative means that location was dropped, nonnegative means it was kept).
1045
- """
1046
- return FlashAttnQKVPackedFunc.apply(
1047
- qkv,
1048
- dropout_p,
1049
- softmax_scale,
1050
- causal,
1051
- window_size,
1052
- softcap,
1053
- alibi_slopes,
1054
- deterministic,
1055
- return_attn_probs,
1056
- torch.is_grad_enabled(),
1057
- )
1058
-
1059
-
1060
- def flash_attn_kvpacked_func(
1061
- q,
1062
- kv,
1063
- dropout_p=0.0,
1064
- softmax_scale=None,
1065
- causal=False,
1066
- window_size=(-1, -1), # -1 means infinite context window
1067
- softcap=0.0, # 0.0 means deactivated
1068
- alibi_slopes=None,
1069
- deterministic=False,
1070
- return_attn_probs=False,
1071
- ):
1072
- """dropout_p should be set to 0.0 during evaluation
1073
- If K, V are already stacked into 1 tensor, this function will be faster than
1074
- calling flash_attn_func on Q, K, V since the backward pass avoids explicit concatenation
1075
- of the gradients of K, V.
1076
- Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
1077
- than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
1078
- For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
1079
- 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
1080
-
1081
- If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
1082
- For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
1083
- 1 1 1 1 0
1084
- 1 1 1 1 1
1085
- If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
1086
- 0 0
1087
- 0 0
1088
- 0 0
1089
- 1 0
1090
- 1 1
1091
- If the row of the mask is all zero, the output will be zero.
1092
-
1093
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
1094
- will only attend to keys between
1095
- [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
1096
-
1097
- Arguments:
1098
- q: (batch_size, seqlen, nheads, headdim)
1099
- kv: (batch_size, seqlen, 2, nheads_k, headdim)
1100
- dropout_p: float. Dropout probability.
1101
- softmax_scale: float. The scaling of QK^T before applying softmax.
1102
- Default to 1 / sqrt(headdim).
1103
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1104
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1105
- softcap: float. Anything > 0 activates softcapping attention.
1106
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
1107
- (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
1108
- is added to the attention score of query i and key j.
1109
- deterministic: bool. Whether to use the deterministic implementation of the backward pass,
1110
- which is slightly slower and uses more memory. The forward pass is always deterministic.
1111
- return_attn_probs: bool. Whether to return the attention probabilities. This option is for
1112
- testing only. The returned probabilities are not guaranteed to be correct
1113
- (they might not have the right scaling).
1114
- Return:
1115
- out: (batch_size, seqlen, nheads, headdim).
1116
- softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
1117
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1118
- normalization factor).
1119
- S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
1120
- The output of softmax (possibly with different scaling). It also encodes the dropout
1121
- pattern (negative means that location was dropped, nonnegative means it was kept).
1122
- """
1123
- return FlashAttnKVPackedFunc.apply(
1124
- q,
1125
- kv,
1126
- dropout_p,
1127
- softmax_scale,
1128
- causal,
1129
- window_size,
1130
- softcap,
1131
- alibi_slopes,
1132
- deterministic,
1133
- return_attn_probs,
1134
- torch.is_grad_enabled(),
1135
- )
1136
-
1137
-
1138
- def flash_attn_func(
1139
- q,
1140
- k,
1141
- v,
1142
- dropout_p=0.0,
1143
- softmax_scale=None,
1144
- causal=False,
1145
- window_size=(-1, -1), # -1 means infinite context window
1146
- softcap=0.0, # 0.0 means deactivated
1147
- alibi_slopes=None,
1148
- deterministic=False,
1149
- return_attn_probs=False,
1150
- ):
1151
- """dropout_p should be set to 0.0 during evaluation
1152
- Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
1153
- than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
1154
- For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
1155
- 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
1156
-
1157
- If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
1158
- For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
1159
- 1 1 1 1 0
1160
- 1 1 1 1 1
1161
- If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
1162
- 0 0
1163
- 0 0
1164
- 0 0
1165
- 1 0
1166
- 1 1
1167
- If the row of the mask is all zero, the output will be zero.
1168
-
1169
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
1170
- will only attend to keys between
1171
- [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
1172
-
1173
- Arguments:
1174
- q: (batch_size, seqlen, nheads, headdim)
1175
- k: (batch_size, seqlen, nheads_k, headdim)
1176
- v: (batch_size, seqlen, nheads_k, headdim)
1177
- dropout_p: float. Dropout probability.
1178
- softmax_scale: float. The scaling of QK^T before applying softmax.
1179
- Default to 1 / sqrt(headdim).
1180
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1181
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1182
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
1183
- (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
1184
- is added to the attention score of query i and key j.
1185
- deterministic: bool. Whether to use the deterministic implementation of the backward pass,
1186
- which is slightly slower and uses more memory. The forward pass is always deterministic.
1187
- return_attn_probs: bool. Whether to return the attention probabilities. This option is for
1188
- testing only. The returned probabilities are not guaranteed to be correct
1189
- (they might not have the right scaling).
1190
- Return:
1191
- out: (batch_size, seqlen, nheads, headdim).
1192
- softmax_lse [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen). The
1193
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1194
- normalization factor).
1195
- S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
1196
- The output of softmax (possibly with different scaling). It also encodes the dropout
1197
- pattern (negative means that location was dropped, nonnegative means it was kept).
1198
- """
1199
- return FlashAttnFunc.apply(
1200
- q,
1201
- k,
1202
- v,
1203
- dropout_p,
1204
- softmax_scale,
1205
- causal,
1206
- window_size,
1207
- softcap,
1208
- alibi_slopes,
1209
- deterministic,
1210
- return_attn_probs,
1211
- torch.is_grad_enabled(),
1212
- )
1213
-
1214
-
1215
- def flash_attn_varlen_qkvpacked_func(
1216
- qkv,
1217
- cu_seqlens,
1218
- max_seqlen,
1219
- dropout_p=0.0,
1220
- softmax_scale=None,
1221
- causal=False,
1222
- window_size=(-1, -1), # -1 means infinite context window
1223
- softcap=0.0, # 0.0 means deactivated
1224
- alibi_slopes=None,
1225
- deterministic=False,
1226
- return_attn_probs=False,
1227
- ):
1228
- """dropout_p should be set to 0.0 during evaluation
1229
- If Q, K, V are already stacked into 1 tensor, this function will be faster than
1230
- calling flash_attn_varlen_func on Q, K, V since the backward pass avoids explicit concatenation
1231
- of the gradients of Q, K, V.
1232
- For multi-query and grouped-query attention (MQA/GQA), please see
1233
- flash_attn_varlen_kvpacked_func and flash_attn_varlen_func.
1234
-
1235
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
1236
- will only attend to keys between [i - window_size[0], i + window_size[1]] inclusive.
1237
-
1238
- Arguments:
1239
- qkv: (total, 3, nheads, headdim), where total = total number of tokens in the batch.
1240
- cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
1241
- of the sequences in the batch, used to index into qkv.
1242
- max_seqlen: int. Maximum sequence length in the batch.
1243
- dropout_p: float. Dropout probability.
1244
- softmax_scale: float. The scaling of QK^T before applying softmax.
1245
- Default to 1 / sqrt(headdim).
1246
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1247
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1248
- softcap: float. Anything > 0 activates softcapping attention.
1249
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of (-alibi_slope * |i - j|)
1250
- is added to the attention score of query i and key j.
1251
- deterministic: bool. Whether to use the deterministic implementation of the backward pass,
1252
- which is slightly slower and uses more memory. The forward pass is always deterministic.
1253
- return_attn_probs: bool. Whether to return the attention probabilities. This option is for
1254
- testing only. The returned probabilities are not guaranteed to be correct
1255
- (they might not have the right scaling).
1256
- Return:
1257
- out: (total, nheads, headdim).
1258
- softmax_lse [optional, if return_attn_probs=True]: (nheads, total_q_seqlen). The
1259
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1260
- normalization factor).
1261
- S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
1262
- The output of softmax (possibly with different scaling). It also encodes the dropout
1263
- pattern (negative means that location was dropped, nonnegative means it was kept).
1264
- """
1265
- return FlashAttnVarlenQKVPackedFunc.apply(
1266
- qkv,
1267
- cu_seqlens,
1268
- max_seqlen,
1269
- dropout_p,
1270
- softmax_scale,
1271
- causal,
1272
- window_size,
1273
- softcap,
1274
- alibi_slopes,
1275
- deterministic,
1276
- return_attn_probs,
1277
- torch.is_grad_enabled(),
1278
- )
1279
-
1280
-
1281
- def flash_attn_varlen_kvpacked_func(
1282
- q,
1283
- kv,
1284
- cu_seqlens_q,
1285
- cu_seqlens_k,
1286
- max_seqlen_q,
1287
- max_seqlen_k,
1288
- dropout_p=0.0,
1289
- softmax_scale=None,
1290
- causal=False,
1291
- window_size=(-1, -1), # -1 means infinite context window
1292
- softcap=0.0, # 0.0 means deactivated
1293
- alibi_slopes=None,
1294
- deterministic=False,
1295
- return_attn_probs=False,
1296
- ):
1297
- """dropout_p should be set to 0.0 during evaluation
1298
- If K, V are already stacked into 1 tensor, this function will be faster than
1299
- calling flash_attn_func on Q, K, V since the backward pass avoids explicit concatenation
1300
- of the gradients of K, V.
1301
- Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
1302
- than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
1303
- For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
1304
- 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
1305
-
1306
- If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
1307
- For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
1308
- 1 1 1 1 0
1309
- 1 1 1 1 1
1310
- If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
1311
- 0 0
1312
- 0 0
1313
- 0 0
1314
- 1 0
1315
- 1 1
1316
- If the row of the mask is all zero, the output will be zero.
1317
-
1318
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
1319
- will only attend to keys between
1320
- [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
1321
-
1322
- Arguments:
1323
- q: (total_q, nheads, headdim), where total_q = total number of query tokens in the batch.
1324
- kv: (total_k, 2, nheads_k, headdim), where total_k = total number of key tokens in the batch.
1325
- cu_seqlens_q: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
1326
- of the sequences in the batch, used to index into q.
1327
- cu_seqlens_k: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
1328
- of the sequences in the batch, used to index into kv.
1329
- max_seqlen_q: int. Maximum query sequence length in the batch.
1330
- max_seqlen_k: int. Maximum key sequence length in the batch.
1331
- dropout_p: float. Dropout probability.
1332
- softmax_scale: float. The scaling of QK^T before applying softmax.
1333
- Default to 1 / sqrt(headdim).
1334
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1335
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1336
- softcap: float. Anything > 0 activates softcapping attention.
1337
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
1338
- (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
1339
- is added to the attention score of query i and key j.
1340
- deterministic: bool. Whether to use the deterministic implementation of the backward pass,
1341
- which is slightly slower and uses more memory. The forward pass is always deterministic.
1342
- return_attn_probs: bool. Whether to return the attention probabilities. This option is for
1343
- testing only. The returned probabilities are not guaranteed to be correct
1344
- (they might not have the right scaling).
1345
- Return:
1346
- out: (total, nheads, headdim).
1347
- softmax_lse [optional, if return_attn_probs=True]: (nheads, total_q_seqlen). The
1348
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1349
- normalization factor).
1350
- S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
1351
- The output of softmax (possibly with different scaling). It also encodes the dropout
1352
- pattern (negative means that location was dropped, nonnegative means it was kept).
1353
- """
1354
- return FlashAttnVarlenKVPackedFunc.apply(
1355
- q,
1356
- kv,
1357
- cu_seqlens_q,
1358
- cu_seqlens_k,
1359
- max_seqlen_q,
1360
- max_seqlen_k,
1361
- dropout_p,
1362
- softmax_scale,
1363
- causal,
1364
- window_size,
1365
- softcap,
1366
- alibi_slopes,
1367
- deterministic,
1368
- return_attn_probs,
1369
- torch.is_grad_enabled(),
1370
- )
1371
-
1372
-
1373
- def flash_attn_varlen_func(
1374
- q,
1375
- k,
1376
- v,
1377
- cu_seqlens_q,
1378
- cu_seqlens_k,
1379
- max_seqlen_q,
1380
- max_seqlen_k,
1381
- dropout_p=0.0,
1382
- softmax_scale=None,
1383
- causal=False,
1384
- window_size=(-1, -1), # -1 means infinite context window
1385
- softcap=0.0, # 0.0 means deactivated
1386
- alibi_slopes=None,
1387
- deterministic=False,
1388
- return_attn_probs=False,
1389
- block_table=None,
1390
- ):
1391
- """dropout_p should be set to 0.0 during evaluation
1392
- Supports multi-query and grouped-query attention (MQA/GQA) by passing in K, V with fewer heads
1393
- than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
1394
- For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
1395
- 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
1396
-
1397
- If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
1398
- For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
1399
- 1 1 1 1 0
1400
- 1 1 1 1 1
1401
- If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
1402
- 0 0
1403
- 0 0
1404
- 0 0
1405
- 1 0
1406
- 1 1
1407
- If the row of the mask is all zero, the output will be zero.
1408
-
1409
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
1410
- will only attend to keys between
1411
- [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
1412
-
1413
- Arguments:
1414
- q: (total_q, nheads, headdim), where total_q = total number of query tokens in the batch.
1415
- k: (total_k, nheads_k, headdim), where total_k = total number of key tokens in the batch.
1416
- v: (total_k, nheads_k, headdim), where total_k = total number of key tokens in the batch.
1417
- cu_seqlens_q: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
1418
- of the sequences in the batch, used to index into q.
1419
- cu_seqlens_k: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
1420
- of the sequences in the batch, used to index into kv.
1421
- max_seqlen_q: int. Maximum query sequence length in the batch.
1422
- max_seqlen_k: int. Maximum key sequence length in the batch.
1423
- dropout_p: float. Dropout probability.
1424
- softmax_scale: float. The scaling of QK^T before applying softmax.
1425
- Default to 1 / sqrt(headdim).
1426
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1427
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1428
- softcap: float. Anything > 0 activates softcapping attention.
1429
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
1430
- (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
1431
- is added to the attention score of query i and key j.
1432
- deterministic: bool. Whether to use the deterministic implementation of the backward pass,
1433
- which is slightly slower and uses more memory. The forward pass is always deterministic.
1434
- return_attn_probs: bool. Whether to return the attention probabilities. This option is for
1435
- testing only. The returned probabilities are not guaranteed to be correct
1436
- (they might not have the right scaling).
1437
- Return:
1438
- out: (total, nheads, headdim).
1439
- softmax_lse [optional, if return_attn_probs=True]: (nheads, total_q_seqlen). The
1440
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1441
- normalization factor).
1442
- S_dmask [optional, if return_attn_probs=True]: (batch_size, nheads, seqlen, seqlen).
1443
- The output of softmax (possibly with different scaling). It also encodes the dropout
1444
- pattern (negative means that location was dropped, nonnegative means it was kept).
1445
- """
1446
- return FlashAttnVarlenFunc.apply(
1447
- q,
1448
- k,
1449
- v,
1450
- cu_seqlens_q,
1451
- cu_seqlens_k,
1452
- max_seqlen_q,
1453
- max_seqlen_k,
1454
- dropout_p,
1455
- softmax_scale,
1456
- causal,
1457
- window_size,
1458
- softcap,
1459
- alibi_slopes,
1460
- deterministic,
1461
- return_attn_probs,
1462
- block_table,
1463
- torch.is_grad_enabled(),
1464
- )
1465
-
1466
-
1467
- def flash_attn_with_kvcache(
1468
- q,
1469
- k_cache,
1470
- v_cache,
1471
- k=None,
1472
- v=None,
1473
- rotary_cos=None,
1474
- rotary_sin=None,
1475
- cache_seqlens: Optional[Union[(int, torch.Tensor)]] = None,
1476
- cache_batch_idx: Optional[torch.Tensor] = None,
1477
- cache_leftpad: Optional[torch.Tensor] = None,
1478
- block_table: Optional[torch.Tensor] = None,
1479
- softmax_scale=None,
1480
- causal=False,
1481
- window_size=(-1, -1), # -1 means infinite context window
1482
- softcap=0.0, # 0.0 means deactivated
1483
- rotary_interleaved=True,
1484
- alibi_slopes=None,
1485
- num_splits=0,
1486
- return_softmax_lse=False,
1487
- ):
1488
- """
1489
- If k and v are not None, k_cache and v_cache will be updated *inplace* with the new values from
1490
- k and v. This is useful for incremental decoding: you can pass in the cached keys/values from
1491
- the previous step, and update them with the new keys/values from the current step, and do
1492
- attention with the updated cache, all in 1 kernel.
1493
-
1494
- If you pass in k / v, you must make sure that the cache is large enough to hold the new values.
1495
- For example, the KV cache could be pre-allocated with the max sequence length, and you can use
1496
- cache_seqlens to keep track of the current sequence lengths of each sequence in the batch.
1497
-
1498
- Also apply rotary embedding if rotary_cos and rotary_sin are passed in. The key @k will be
1499
- rotated by rotary_cos and rotary_sin at indices cache_seqlens, cache_seqlens + 1, etc.
1500
- If causal or local (i.e., window_size != (-1, -1)), the query @q will be rotated by rotary_cos
1501
- and rotary_sin at indices cache_seqlens, cache_seqlens + 1, etc.
1502
- If not causal and not local, the query @q will be rotated by rotary_cos and rotary_sin at
1503
- indices cache_seqlens only (i.e. we consider all tokens in @q to be at position cache_seqlens).
1504
-
1505
- See tests/test_flash_attn.py::test_flash_attn_kvcache for examples of how to use this function.
1506
-
1507
- Supports multi-query and grouped-query attention (MQA/GQA) by passing in KV with fewer heads
1508
- than Q. Note that the number of heads in Q must be divisible by the number of heads in KV.
1509
- For example, if Q has 6 heads and K, V have 2 heads, head 0, 1, 2 of Q will attention to head
1510
- 0 of K, V, and head 3, 4, 5 of Q will attention to head 1 of K, V.
1511
-
1512
- If causal=True, the causal mask is aligned to the bottom right corner of the attention matrix.
1513
- For example, if seqlen_q = 2 and seqlen_k = 5, the causal mask (1 = keep, 0 = masked out) is:
1514
- 1 1 1 1 0
1515
- 1 1 1 1 1
1516
- If seqlen_q = 5 and seqlen_k = 2, the causal mask is:
1517
- 0 0
1518
- 0 0
1519
- 0 0
1520
- 1 0
1521
- 1 1
1522
- If the row of the mask is all zero, the output will be zero.
1523
-
1524
- If window_size != (-1, -1), implements sliding window local attention. Query at position i
1525
- will only attend to keys between
1526
- [i + seqlen_k - seqlen_q - window_size[0], i + seqlen_k - seqlen_q + window_size[1]] inclusive.
1527
-
1528
- Note: Does not support backward pass.
1529
-
1530
- Arguments:
1531
- q: (batch_size, seqlen, nheads, headdim)
1532
- k_cache: (batch_size_cache, seqlen_cache, nheads_k, headdim) if there's no block_table,
1533
- or (num_blocks, page_block_size, nheads_k, headdim) if there's a block_table (i.e. paged KV cache)
1534
- page_block_size must be a multiple of 256.
1535
- v_cache: (batch_size_cache, seqlen_cache, nheads_k, headdim) if there's no block_table,
1536
- or (num_blocks, page_block_size, nheads_k, headdim) if there's a block_table (i.e. paged KV cache)
1537
- k [optional]: (batch_size, seqlen_new, nheads_k, headdim). If not None, we concatenate
1538
- k with k_cache, starting at the indices specified by cache_seqlens.
1539
- v [optional]: (batch_size, seqlen_new, nheads_k, headdim). Similar to k.
1540
- rotary_cos [optional]: (seqlen_ro, rotary_dim / 2). If not None, we apply rotary embedding
1541
- to k and q. Only applicable if k and v are passed in. rotary_dim must be divisible by 16.
1542
- rotary_sin [optional]: (seqlen_ro, rotary_dim / 2). Similar to rotary_cos.
1543
- cache_seqlens: int, or (batch_size,), dtype torch.int32. The sequence lengths of the
1544
- KV cache.
1545
- cache_batch_idx: (batch_size,), dtype torch.int32. The indices used to index into the KV cache.
1546
- If None, we assume that the batch indices are [0, 1, 2, ..., batch_size - 1].
1547
- If the indices are not distinct, and k and v are provided, the values updated in the cache
1548
- might come from any of the duplicate indices.
1549
- cache_leftpad: (batch_size,), dtype torch.int32. The index that the KV cache starts. If None, assume 0.
1550
- block_table [optional]: (batch_size, max_num_blocks_per_seq), dtype torch.int32.
1551
- softmax_scale: float. The scaling of QK^T before applying softmax.
1552
- Default to 1 / sqrt(headdim).
1553
- causal: bool. Whether to apply causal attention mask (e.g., for auto-regressive modeling).
1554
- window_size: (left, right). If not (-1, -1), implements sliding window local attention.
1555
- softcap: float. Anything > 0 activates softcapping attention.
1556
- rotary_interleaved: bool. Only applicable if rotary_cos and rotary_sin are passed in.
1557
- If True, rotary embedding will combine dimensions 0 & 1, 2 & 3, etc. If False,
1558
- rotary embedding will combine dimensions 0 & rotary_dim / 2, 1 & rotary_dim / 2 + 1
1559
- (i.e. GPT-NeoX style).
1560
- alibi_slopes: (nheads,) or (batch_size, nheads), fp32. A bias of
1561
- (-alibi_slope * |i + seqlen_k - seqlen_q - j|)
1562
- is added to the attention score of query i and key j.
1563
- num_splits: int. If > 1, split the key/value into this many chunks along the sequence.
1564
- If num_splits == 1, we don't split the key/value. If num_splits == 0, we use a heuristic
1565
- to automatically determine the number of splits.
1566
- Don't change this unless you know what you are doing.
1567
- return_softmax_lse: bool. Whether to return the logsumexp of the attention scores.
1568
-
1569
- Return:
1570
- out: (batch_size, seqlen, nheads, headdim).
1571
- softmax_lse [optional, if return_softmax_lse=True]: (batch_size, nheads, seqlen). The
1572
- logsumexp of each row of the matrix QK^T * scaling (e.g., log of the softmax
1573
- normalization factor).
1574
- """
1575
- assert k_cache.stride(-1) == 1, "k_cache must have contiguous last dimension"
1576
- assert v_cache.stride(-1) == 1, "v_cache must have contiguous last dimension"
1577
- q, k, v = [maybe_contiguous(x) for x in (q, k, v)]
1578
- if softmax_scale is None:
1579
- softmax_scale = q.shape[-1] ** (-0.5)
1580
- if cache_seqlens is not None and isinstance(cache_seqlens, int):
1581
- cache_seqlens = torch.full(
1582
- (k_cache.shape[0],), cache_seqlens, dtype=torch.int32, device=k_cache.device
1583
- )
1584
- cache_seqlens = maybe_contiguous(cache_seqlens)
1585
- cache_batch_idx = maybe_contiguous(cache_batch_idx)
1586
- block_table = maybe_contiguous(block_table)
1587
- out, softmax_lse = flash_attn_gpu.fwd_kvcache(
1588
- q,
1589
- k_cache,
1590
- v_cache,
1591
- k,
1592
- v,
1593
- cache_seqlens,
1594
- rotary_cos,
1595
- rotary_sin,
1596
- cache_batch_idx,
1597
- cache_leftpad,
1598
- block_table,
1599
- alibi_slopes,
1600
- None,
1601
- softmax_scale,
1602
- causal,
1603
- window_size[0],
1604
- window_size[1],
1605
- softcap,
1606
- rotary_interleaved,
1607
- num_splits,
1608
- )
1609
- return (out, softmax_lse) if return_softmax_lse else out
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu126-x86_64-linux/flash_attn/layers/__init__.py DELETED
File without changes
build/torch26-cxx98-cu126-x86_64-linux/flash_attn/layers/patch_embed.py DELETED
@@ -1,67 +0,0 @@
1
- # We use the same API as https://github.com/rwightman/pytorch-image-models/blob/v0.6.11/timm/models/layers/patch_embed.py
2
- # But we use nn.Linear instead of Conv2d and it's about 8x faster.
3
-
4
- from functools import partial
5
-
6
- import torch.nn as nn
7
- from einops import rearrange
8
- from torch import _assert
9
- from torch.nn.modules.utils import _pair
10
-
11
- try:
12
- from flash_attn.ops.fused_dense import FusedDense
13
- except ImportError:
14
- FusedDense = None
15
-
16
-
17
- class PatchEmbed(nn.Module):
18
- """2D Image to Patch Embedding"""
19
-
20
- def __init__(
21
- self,
22
- img_size=224,
23
- patch_size=16,
24
- in_chans=3,
25
- embed_dim=768,
26
- norm_layer=None,
27
- flatten=True,
28
- bias=True,
29
- fused_bias_fc=False,
30
- ):
31
- super().__init__()
32
- img_size = _pair(img_size)
33
- patch_size = _pair(patch_size)
34
- self.img_size = img_size
35
- self.patch_size = patch_size
36
- self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
37
- self.num_patches = self.grid_size[0] * self.grid_size[1]
38
- self.flatten = flatten
39
- if fused_bias_fc and FusedDense is None:
40
- raise ImportError("fused_dense is not installed")
41
-
42
- linear_cls = nn.Linear if not fused_bias_fc or not bias else FusedDense
43
- self.proj = linear_cls(in_chans * patch_size[0] * patch_size[1], embed_dim, bias=bias)
44
- self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
45
-
46
- def forward(self, x):
47
- _, _, H, W = x.shape
48
- _assert(
49
- H == self.img_size[0],
50
- f"Input image height ({H}) doesn't match model ({self.img_size[0]}).",
51
- )
52
- _assert(
53
- W == self.img_size[1],
54
- f"Input image width ({W}) doesn't match model ({self.img_size[1]}).",
55
- )
56
- x = self.proj(
57
- rearrange(
58
- x,
59
- "b c (h p1) (w p2) -> b h w (c p1 p2)",
60
- p1=self.patch_size[0],
61
- p2=self.patch_size[1],
62
- )
63
- )
64
- if self.flatten:
65
- x = rearrange(x, "b h w c -> b (h w) c")
66
- x = self.norm(x)
67
- return x
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu126-x86_64-linux/flash_attn/layers/rotary.py DELETED
@@ -1,483 +0,0 @@
1
- # Copyright (c) 2025, Tri Dao
2
-
3
- import math
4
- from functools import partial
5
- from typing import Optional, Tuple, Union
6
-
7
- import torch
8
- from torch import Tensor
9
-
10
- from einops import rearrange, repeat
11
- # from flash_attn.ops.triton.rotary import apply_rotary
12
- from ..ops.triton.rotary import apply_rotary
13
-
14
-
15
- def rotate_half(x, interleaved=False):
16
- if not interleaved:
17
- x1, x2 = x.chunk(2, dim=-1)
18
- return torch.cat((-x2, x1), dim=-1)
19
- else:
20
- x1, x2 = x[..., ::2], x[..., 1::2]
21
- return rearrange(torch.stack((-x2, x1), dim=-1), "... d two -> ... (d two)", two=2)
22
-
23
-
24
- def apply_rotary_emb_torch(x, cos, sin, interleaved=False):
25
- """
26
- x: (batch_size, seqlen, nheads, headdim)
27
- cos, sin: (seqlen, rotary_dim / 2) or (batch_size, seqlen, rotary_dim / 2)
28
- """
29
- ro_dim = cos.shape[-1] * 2
30
- assert ro_dim <= x.shape[-1]
31
- cos = repeat(cos, "... d -> ... 1 (2 d)" if not interleaved else "... d -> ... 1 (d 2)")
32
- sin = repeat(sin, "... d -> ... 1 (2 d)" if not interleaved else "... d -> ... 1 (d 2)")
33
- return torch.cat(
34
- [x[..., :ro_dim] * cos + rotate_half(x[..., :ro_dim], interleaved) * sin, x[..., ro_dim:]],
35
- dim=-1,
36
- )
37
-
38
-
39
- class ApplyRotaryEmb(torch.autograd.Function):
40
- @staticmethod
41
- def forward(
42
- ctx,
43
- x,
44
- cos,
45
- sin,
46
- interleaved=False,
47
- inplace=False,
48
- seqlen_offsets: Union[int, Tensor] = 0,
49
- cu_seqlens: Optional[Tensor] = None,
50
- max_seqlen: Optional[int] = None,
51
- ):
52
- out = apply_rotary(
53
- x,
54
- cos,
55
- sin,
56
- seqlen_offsets=seqlen_offsets,
57
- cu_seqlens=cu_seqlens,
58
- max_seqlen=max_seqlen,
59
- interleaved=interleaved,
60
- inplace=inplace,
61
- )
62
- if isinstance(seqlen_offsets, int):
63
- ctx.save_for_backward(cos, sin, cu_seqlens) # Can't save int with save_for_backward
64
- ctx.seqlen_offsets = seqlen_offsets
65
- else:
66
- ctx.save_for_backward(cos, sin, cu_seqlens, seqlen_offsets)
67
- ctx.seqlen_offsets = None
68
- ctx.interleaved = interleaved
69
- ctx.inplace = inplace
70
- ctx.max_seqlen = max_seqlen
71
- return out if not inplace else x
72
-
73
- @staticmethod
74
- def backward(ctx, do):
75
- seqlen_offsets = ctx.seqlen_offsets
76
- if seqlen_offsets is None:
77
- cos, sin, cu_seqlens, seqlen_offsets = ctx.saved_tensors
78
- else:
79
- cos, sin, cu_seqlens = ctx.saved_tensors
80
- dx = apply_rotary(
81
- do,
82
- cos,
83
- sin,
84
- seqlen_offsets=seqlen_offsets,
85
- cu_seqlens=cu_seqlens,
86
- max_seqlen=ctx.max_seqlen,
87
- interleaved=ctx.interleaved,
88
- inplace=ctx.inplace,
89
- conjugate=True,
90
- )
91
- return dx, None, None, None, None, None, None, None
92
-
93
-
94
- def apply_rotary_emb(
95
- x,
96
- cos,
97
- sin,
98
- interleaved=False,
99
- inplace=False,
100
- seqlen_offsets: Union[int, Tensor] = 0,
101
- cu_seqlens: Optional[Tensor] = None,
102
- max_seqlen: Optional[int] = None,
103
- ):
104
- """
105
- Arguments:
106
- x: (batch_size, seqlen, nheads, headdim) if cu_seqlens is None
107
- else (total_seqlen, nheads, headdim)
108
- cos, sin: (seqlen_rotary, rotary_dim / 2)
109
- interleaved: if True, rotate pairs of even and odd dimensions (GPT-J style) instead
110
- of 1st half and 2nd half (GPT-NeoX style).
111
- inplace: if True, apply rotary embedding in-place.
112
- seqlen_offsets: (batch_size,) or int. Each sequence in x is shifted by this amount.
113
- Most commonly used in inference when we have KV cache.
114
- cu_seqlens: (batch + 1,) or None
115
- max_seqlen: int
116
- Return:
117
- out: (batch_size, seqlen, nheads, headdim) if cu_seqlens is None
118
- else (total_seqlen, nheads, headdim)
119
- rotary_dim must be <= headdim
120
- Apply rotary embedding to the first rotary_dim of x.
121
- """
122
- return ApplyRotaryEmb.apply(
123
- x, cos, sin, interleaved, inplace, seqlen_offsets, cu_seqlens, max_seqlen
124
- )
125
-
126
-
127
- # For backward compatibility
128
- apply_rotary_emb_func = apply_rotary_emb
129
-
130
-
131
- def _apply_rotary_emb_qkv(
132
- qkv,
133
- cos,
134
- sin,
135
- cos_k=None,
136
- sin_k=None,
137
- interleaved=False,
138
- inplace=False,
139
- conjugate=False,
140
- seqlen_offsets: Union[int, Tensor] = 0,
141
- num_heads_q: Optional[int] = None,
142
- ):
143
- apply_rotary_fn = partial(
144
- apply_rotary,
145
- interleaved=interleaved,
146
- inplace=inplace,
147
- conjugate=conjugate,
148
- seqlen_offsets=seqlen_offsets
149
- )
150
- if cos_k is None and sin_k is None and qkv.is_contiguous():
151
- # Call 1 kernel instead of 2 kernels
152
- # We need qkv to be contiguous so that when we reshape to combine (3, nheads)
153
- # dimensions, we get the same tensor
154
- if qkv.dim() == 5:
155
- batch, seqlen, three, nheads, headdim = qkv.shape
156
- assert three == 3
157
- # qk = rearrange(qkv[:, :, :2], "b s t h d -> b s (t h) d")
158
- qk = qkv[:, :, :2].reshape(batch, seqlen, -1, headdim)
159
- qk = apply_rotary_fn(qk, cos, sin)
160
- else:
161
- assert qkv.dim() == 4
162
- assert num_heads_q is not None
163
- num_heads_k = (qkv.shape[2] - num_heads_q) // 2
164
- assert qkv.shape[2] == num_heads_q + 2 * num_heads_k
165
- qk = qkv[:, :, :num_heads_q + num_heads_k]
166
- qk = apply_rotary_fn(qk, cos, sin)
167
- if not inplace:
168
- if qkv.dim() == 5:
169
- qkv = torch.cat([rearrange(qk, "b s (t h) d -> b s t h d", t=2), qkv[:, :, 2:]], dim=2)
170
- else:
171
- qkv = torch.cat([qk, qkv[:, :, num_heads_q + num_heads_k :]], dim=2)
172
- else:
173
- cos_k = cos if cos_k is None else cos_k
174
- sin_k = sin if sin_k is None else sin_k
175
- if qkv.dim() == 5:
176
- batch, seqlen, three, nheads, headdim = qkv.shape
177
- assert three == 3
178
- q, k = qkv[:, :, 0], qkv[:, :, 1]
179
- else:
180
- assert qkv.dim() == 4
181
- assert num_heads_q is not None
182
- num_heads_k = (qkv.shape[2] - num_heads_q) // 2
183
- assert qkv.shape[2] == num_heads_q + 2 * num_heads_k
184
- q, k = qkv[:, :, :num_heads_q], qkv[:, :, num_heads_q : num_heads_q + num_heads_k]
185
- q = apply_rotary_fn(q, cos, sin)
186
- k = apply_rotary_fn(k, cos_k, sin_k)
187
- if not inplace:
188
- if qkv.dim() == 5:
189
- qkv = torch.stack([q, k, qkv[:, :, 2]], dim=2)
190
- else:
191
- qkv = torch.cat([q, k, qkv[:, :, num_heads_q + num_heads_k:]], dim=2)
192
- return qkv
193
-
194
-
195
- class ApplyRotaryEmbQKV_(torch.autograd.Function):
196
- @staticmethod
197
- def forward(
198
- ctx,
199
- qkv,
200
- cos,
201
- sin,
202
- cos_k=None,
203
- sin_k=None,
204
- interleaved=False,
205
- seqlen_offsets: Union[int, torch.Tensor] = 0,
206
- num_heads_q: Optional[int] = None,
207
- ):
208
- # apply_rotary_emb_qkv_inplace(
209
- qkv = _apply_rotary_emb_qkv(
210
- qkv, cos, sin, cos_k, sin_k, interleaved=interleaved, inplace=True,
211
- seqlen_offsets=seqlen_offsets, num_heads_q=num_heads_q,
212
- )
213
- if isinstance(seqlen_offsets, int):
214
- ctx.save_for_backward(cos, sin, cos_k, sin_k)
215
- ctx.seqlen_offsets = seqlen_offsets
216
- else:
217
- ctx.save_for_backward(cos, sin, cos_k, sin_k, seqlen_offsets)
218
- ctx.seqlen_offsets = None
219
- ctx.interleaved = interleaved
220
- ctx.num_heads_q = num_heads_q
221
- return qkv
222
-
223
- @staticmethod
224
- def backward(ctx, dqkv):
225
- seqlen_offsets = ctx.seqlen_offsets
226
- if seqlen_offsets is None:
227
- cos, sin, cos_k, sin_k, seqlen_offsets = ctx.saved_tensors
228
- else:
229
- cos, sin, cos_k, sin_k = ctx.saved_tensors
230
- dqkv = _apply_rotary_emb_qkv(
231
- dqkv, cos, sin, cos_k, sin_k, interleaved=ctx.interleaved, inplace=True,
232
- seqlen_offsets=seqlen_offsets, num_heads_q=ctx.num_heads_q, conjugate=True,
233
- )
234
- return dqkv, None, None, None, None, None, None, None
235
-
236
-
237
- def apply_rotary_emb_qkv_(
238
- qkv,
239
- cos,
240
- sin,
241
- cos_k=None,
242
- sin_k=None,
243
- interleaved=False,
244
- seqlen_offsets: Union[int, torch.Tensor] = 0,
245
- num_heads_q: Optional[int] = None,
246
- ):
247
- """
248
- Arguments:
249
- qkv: (batch_size, seqlen, 3, nheads, headdim) or (batch_size, seqlen, num_heads_q + 2 * num_heads_k, headdim).
250
- If qkv has shape (batch_size, seqlen, num_heads_q + 2 * num_heads_k, headdim) (e.g. MQA / GQA),
251
- then num_heads_q must be provided.
252
- cos, sin: (seqlen, rotary_dim / 2)
253
- cos_k, sin_k: (seqlen, rotary_dim / 2), optional
254
- interleaved: if True, rotate pairs of even and odd dimensions (GPT-J style) instead of
255
- 1st half and 2nd half (GPT-NeoX style).
256
- seqlen_offsets: (batch_size,) or int. Each sequence in Q and K is shifted by this amount.
257
- Most commonly used in inference when we have KV cache.
258
- Return:
259
- qkv: (batch_size, seqlen, 3, nheads, headdim) or (batch_size, seqlen, num_heads_q + 2 * num_heads_k, headdim)
260
- rotary_dim must be <= headdim
261
- Apply rotary embedding *inplace* to the first rotary_dim of Q and K.
262
- """
263
- return ApplyRotaryEmbQKV_.apply(
264
- qkv, cos, sin, cos_k, sin_k, interleaved, seqlen_offsets, num_heads_q
265
- )
266
-
267
-
268
- class ApplyRotaryEmbKV_(torch.autograd.Function):
269
-
270
- @staticmethod
271
- def forward(ctx, kv, cos, sin, interleaved=False, seqlen_offsets: Union[int, torch.Tensor] = 0):
272
- batch, seqlen, two, nheads, headdim = kv.shape
273
- assert two == 2
274
- k = kv[:, :, 0]
275
- apply_rotary(
276
- k, cos, sin, seqlen_offsets=seqlen_offsets, interleaved=interleaved, inplace=True
277
- )
278
- if isinstance(seqlen_offsets, int):
279
- ctx.save_for_backward(cos, sin) # Can't save int with save_for_backward
280
- ctx.seqlen_offsets = seqlen_offsets
281
- else:
282
- ctx.save_for_backward(cos, sin, seqlen_offsets)
283
- ctx.seqlen_offsets = None
284
- ctx.interleaved = interleaved
285
- return kv
286
-
287
- @staticmethod
288
- def backward(ctx, dkv):
289
- seqlen_offsets = ctx.seqlen_offsets
290
- if seqlen_offsets is None:
291
- cos, sin, seqlen_offsets = ctx.saved_tensors
292
- else:
293
- cos, sin = ctx.saved_tensors
294
- apply_rotary(
295
- dkv[:, :, 0],
296
- cos,
297
- sin,
298
- seqlen_offsets=seqlen_offsets,
299
- interleaved=ctx.interleaved,
300
- inplace=True,
301
- conjugate=True,
302
- )
303
- return dkv, None, None, None, None
304
-
305
-
306
- apply_rotary_emb_kv_ = ApplyRotaryEmbKV_.apply
307
-
308
-
309
- def apply_rotary_emb_kv_(
310
- kv,
311
- cos,
312
- sin,
313
- interleaved=False,
314
- seqlen_offsets: Union[int, torch.Tensor] = 0,
315
- ):
316
- """
317
- Arguments:
318
- kv: (batch_size, seqlen, 2, nheads, headdim)
319
- cos, sin: (seqlen, rotary_dim / 2)
320
- interleaved: if True, rotate pairs of even and odd dimensions (GPT-J style) instead of
321
- 1st half and 2nd half (GPT-NeoX style).
322
- seqlen_offsets: (batch_size,) or int. Each sequence in Q and K is shifted by this amount.
323
- Most commonly used in inference when we have KV cache.
324
- Return:
325
- kv: (batch_size, seqlen, 2, nheads, headdim)
326
- rotary_dim must be <= headdim
327
- Apply rotary embedding *inplace* to the first rotary_dim of K.
328
- """
329
- return ApplyRotaryEmbKV_.apply(kv, cos, sin, interleaved, seqlen_offsets)
330
-
331
-
332
- class RotaryEmbedding(torch.nn.Module):
333
- """
334
- The rotary position embeddings from RoFormer_ (Su et. al).
335
- A crucial insight from the method is that the query and keys are
336
- transformed by rotation matrices which depend on the relative positions.
337
-
338
- Other implementations are available in the Rotary Transformer repo_ and in
339
- GPT-NeoX_, GPT-NeoX was an inspiration
340
-
341
- .. _RoFormer: https://arxiv.org/abs/2104.09864
342
- .. _repo: https://github.com/ZhuiyiTechnology/roformer
343
- .. _GPT-NeoX: https://github.com/EleutherAI/gpt-neox
344
-
345
- If scale_base is not None, this implements XPos (Sun et al., https://arxiv.org/abs/2212.10554).
346
- A recommended value for scale_base is 512: https://github.com/HazyResearch/flash-attention/issues/96
347
- Reference: https://github.com/sunyt32/torchscale/blob/main/torchscale/component/xpos_relative_position.py
348
- """
349
-
350
- def __init__(
351
- self,
352
- dim: int,
353
- base=10000.0,
354
- interleaved=False,
355
- scale_base=None,
356
- device=None,
357
- ):
358
- """
359
- interleaved: if True, rotate pairs of even and odd dimensions (GPT-J style) instead
360
- of 1st half and 2nd half (GPT-NeoX style).
361
- """
362
- super().__init__()
363
- self.dim = dim
364
- self.base = float(base)
365
- # Generate and save the inverse frequency buffer (non trainable)
366
- inv_freq = self._compute_inv_freq(device)
367
- self.register_buffer("inv_freq", inv_freq, persistent=False)
368
- self.interleaved = interleaved
369
- self.scale_base = scale_base
370
- scale = (
371
- (torch.arange(0, dim, 2, device=device, dtype=torch.float32) + 0.4 * dim) / (1.4 * dim)
372
- if scale_base is not None
373
- else None
374
- )
375
- self.register_buffer("scale", scale, persistent=False)
376
-
377
- self._seq_len_cached = 0
378
- self._cos_cached = None
379
- self._sin_cached = None
380
- self._cos_k_cached = None
381
- self._sin_k_cached = None
382
-
383
- def _compute_inv_freq(self, device=None):
384
- return 1.0 / (
385
- self.base
386
- ** (torch.arange(0, self.dim, 2, device=device, dtype=torch.float32) / self.dim)
387
- )
388
-
389
- def _update_cos_sin_cache(self, seqlen, device=None, dtype=None):
390
- # Reset the tables if the sequence length has changed,
391
- # if we're on a new device (possibly due to tracing for instance),
392
- # or if we're switching from inference mode to training
393
- if (
394
- seqlen > self._seq_len_cached
395
- or self._cos_cached is None
396
- or self._cos_cached.device != device
397
- or self._cos_cached.dtype != dtype
398
- or (self.training and self._cos_cached.is_inference())
399
- ):
400
- self._seq_len_cached = seqlen
401
- # We want fp32 here, not self.inv_freq.dtype, since the model could be loaded in bf16
402
- # And the output of arange can be quite large, so bf16 would lose a lot of precision.
403
- t = torch.arange(seqlen, device=device, dtype=torch.float32)
404
- # We want fp32 here as well since inv_freq will be multiplied with t, and the output
405
- # will be large. Having it in bf16 will lose a lot of precision and cause the
406
- # cos & sin output to change significantly.
407
- # We want to recompute self.inv_freq if it was not loaded in fp32
408
- if self.inv_freq.dtype != torch.float32:
409
- inv_freq = self._compute_inv_freq(device=device)
410
- else:
411
- inv_freq = self.inv_freq
412
- # Don't do einsum, it converts fp32 to bf16 under AMP
413
- # freqs = torch.einsum("i,j->ij", t, self.inv_freq)
414
- freqs = torch.outer(t, inv_freq)
415
- if self.scale is None:
416
- self._cos_cached = torch.cos(freqs).to(dtype)
417
- self._sin_cached = torch.sin(freqs).to(dtype)
418
- else:
419
- power = (
420
- torch.arange(seqlen, dtype=self.scale.dtype, device=self.scale.device)
421
- - seqlen // 2
422
- ) / self.scale_base
423
- scale = self.scale.to(device=power.device) ** rearrange(power, "s -> s 1")
424
- # We want the multiplication by scale to happen in fp32
425
- self._cos_cached = (torch.cos(freqs) * scale).to(dtype)
426
- self._sin_cached = (torch.sin(freqs) * scale).to(dtype)
427
- self._cos_k_cached = (torch.cos(freqs) / scale).to(dtype)
428
- self._sin_k_cached = (torch.sin(freqs) / scale).to(dtype)
429
-
430
- def forward(
431
- self,
432
- qkv: torch.Tensor,
433
- kv: Optional[torch.Tensor] = None,
434
- seqlen_offset: Union[int, torch.Tensor] = 0,
435
- max_seqlen: Optional[int] = None,
436
- num_heads_q: Optional[int] = None,
437
- ) -> Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]]:
438
- """
439
- qkv: (batch, seqlen, 3, nheads, headdim) or (batch, seqlen, num_heads_q + 2 * num_heads_k, headdim)
440
- if kv is none, else it's just q of shape (batch, seqlen, nheads, headdim).
441
- If qkv has shape (batch, seqlen, num_heads_q + 2 * num_heads_k, headdim) (e.g. MQA / GQA),
442
- then num_heads_q must be provided.
443
- kv: (batch, seqlen, 2, nheads, headdim)
444
- seqlen_offset: (batch_size,) or int. Each sequence in x is shifted by this amount.
445
- Most commonly used in inference when we have KV cache.
446
- If it's a tensor of shape (batch_size,), then to update the cos / sin cache, one
447
- should pass in max_seqlen, which will update the cos / sin cache up to that length.
448
- Apply rotary embedding *inplace* to qkv and / or kv.
449
- """
450
- seqlen = qkv.shape[1]
451
- if max_seqlen is not None:
452
- self._update_cos_sin_cache(max_seqlen, device=qkv.device, dtype=qkv.dtype)
453
- elif isinstance(seqlen_offset, int):
454
- self._update_cos_sin_cache(seqlen + seqlen_offset, device=qkv.device, dtype=qkv.dtype)
455
- if kv is None:
456
- return apply_rotary_emb_qkv_(
457
- qkv,
458
- self._cos_cached,
459
- self._sin_cached,
460
- self._cos_k_cached if self.scale is not None else None,
461
- self._sin_k_cached if self.scale is not None else None,
462
- interleaved=self.interleaved,
463
- seqlen_offsets=seqlen_offset,
464
- num_heads_q=num_heads_q,
465
- )
466
- else:
467
- q = qkv
468
- q = apply_rotary_emb_func(
469
- q,
470
- self._cos_cached,
471
- self._sin_cached,
472
- interleaved=self.interleaved,
473
- inplace=True,
474
- seqlen_offsets=seqlen_offset,
475
- )
476
- kv = apply_rotary_emb_kv_(
477
- kv,
478
- self._cos_cached if self.scale is None else self._cos_k_cached,
479
- self._sin_cached if self.scale is None else self._sin_k_cached,
480
- interleaved=self.interleaved,
481
- seqlen_offsets=seqlen_offset,
482
- )
483
- return q, kv
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu126-x86_64-linux/flash_attn/ops/__init__.py DELETED
File without changes
build/torch26-cxx98-cu126-x86_64-linux/flash_attn/ops/activations.py DELETED
@@ -1,135 +0,0 @@
1
- # Copied from https://github.com/mlcommons/training_results_v1.1/blob/main/NVIDIA/benchmarks/bert/implementations/pytorch/model/layers/activations.py
2
- import math
3
-
4
- import torch
5
- import torch.nn as nn
6
- import torch.nn.functional as F
7
-
8
- # 1/sqrt(2*pi)-> 0.3989423
9
- # 1/sqrt(2) -> 0.70710678
10
- # sqrt(2/pi) -> 0.79788456
11
-
12
- # this function is tanh approximation of gelu
13
- # actual gelu is:
14
- # x * 0.5 * (1.0 + torch.erf(x * 0.70710678))
15
- @torch.jit.script
16
- def bias_gelu(y, bias):
17
- x = bias + y
18
- return (x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))).to(dtype=y.dtype)
19
-
20
-
21
- # gradient of tanh approximation of gelu
22
- # gradient of actual gelu is:
23
- # 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x)
24
- @torch.jit.script
25
- def bias_gelu_back(g, y, bias):
26
- """Assume that y has shape (B, D) and bias has shape (D)"""
27
- x = bias + y
28
- tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
29
- # sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
30
- ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (
31
- 1 + tanh_out
32
- )
33
- grad_y = ff * g
34
- return grad_y.to(dtype=y.dtype), grad_y.sum(dim=(0), dtype=bias.dtype)
35
-
36
-
37
- class GeLUFunction(torch.autograd.Function):
38
- @staticmethod
39
- # bias is an optional argument
40
- def forward(ctx, input, bias):
41
- ctx.save_for_backward(input, bias)
42
- return bias_gelu(input, bias)
43
-
44
- @staticmethod
45
- def backward(ctx, grad_output):
46
- input, bias = ctx.saved_tensors
47
- tmp = bias_gelu_back(grad_output, input, bias)
48
- return tmp, tmp
49
-
50
-
51
- bias_gelu_impl = GeLUFunction.apply
52
-
53
- # this function is tanh approximation of gelu
54
- # actual gelu is:
55
- # x * 0.5 * (1.0 + torch.erf(x * 0.70710678))
56
- @torch.jit.script
57
- def gelu_fwd(x):
58
- return (x * 0.5 * (1.0 + torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x)))).to(dtype=x.dtype)
59
-
60
-
61
- # gradient of tanh approximation of gelu
62
- # gradient of actual gelu is:
63
- # 0.5 * (1. + torch.erf(x * 0.70710678)) + 0.3989423 * x * torch.exp(-0.5 * x * x)
64
- @torch.jit.script
65
- def gelu_bwd(g, x):
66
- tanh_out = torch.tanh(0.79788456 * x * (1 + 0.044715 * x * x))
67
- # sqrt(2/pi) * 3 * 0.044715 -> 0.1070322243
68
- ff = 0.5 * x * ((1 - tanh_out * tanh_out) * (0.79788456 + 0.1070322243 * x * x)) + 0.5 * (
69
- 1 + tanh_out
70
- )
71
- return (ff * g).to(dtype=x.dtype)
72
-
73
-
74
- class FastGeLUFunction(torch.autograd.Function):
75
- @staticmethod
76
- # bias is an optional argument
77
- def forward(ctx, input):
78
- ctx.save_for_backward(input)
79
- return gelu_fwd(input)
80
-
81
- @staticmethod
82
- def backward(ctx, grad_output):
83
- (input,) = ctx.saved_tensors
84
- tmp = gelu_bwd(grad_output, input)
85
- return tmp
86
-
87
-
88
- fast_gelu_impl = FastGeLUFunction.apply
89
-
90
-
91
- @torch.jit.script
92
- def relu_bwd(g, x):
93
- return torch.where(x >= 0, g, 0.0).to(dtype=x.dtype)
94
-
95
-
96
- @torch.jit.script
97
- def sqrelu_fwd(x):
98
- r = F.relu(x)
99
- return (r * r).to(dtype=x.dtype)
100
-
101
-
102
- @torch.jit.script
103
- def sqrelu_bwd(g, x):
104
- return (2.0 * g * F.relu(x)).to(dtype=x.dtype)
105
-
106
-
107
- swiglu_fwd_codestring = """
108
- template <typename T> T swiglu_fwd(T x, T y) {
109
- return float(x) * float(y) / (1.0f + ::exp(-float(x)));
110
- }
111
- """
112
- swiglu_bwd_codestring = """
113
- template <typename T> void swiglu_bwd(T x, T y, T g, T& dx, T& dy) {
114
- float x_sigmoid = 1.0f / (1.0f + ::exp(-float(x)));
115
- dx = x_sigmoid * (1 + float(x) * (1.0f - x_sigmoid)) * float(g) * float(y);
116
- dy = float(x) * x_sigmoid * float(g);
117
- }
118
- """
119
- swiglu_fwd = torch.cuda.jiterator._create_jit_fn(swiglu_fwd_codestring)
120
- swiglu_bwd = torch.cuda.jiterator._create_multi_output_jit_fn(swiglu_bwd_codestring, num_outputs=2)
121
-
122
-
123
- class SwiGLUFunction(torch.autograd.Function):
124
-
125
- @staticmethod
126
- def forward(ctx, x, y):
127
- ctx.save_for_backward(x, y)
128
- return swiglu_fwd(x, y)
129
-
130
- @staticmethod
131
- def backward(ctx, dout):
132
- x, y = ctx.saved_tensors
133
- return swiglu_bwd(x, y, dout)
134
-
135
- swiglu = SwiGLUFunction.apply
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
build/torch26-cxx98-cu126-x86_64-linux/flash_attn/ops/fused_dense.py DELETED
@@ -1,688 +0,0 @@
1
- # Copyright (c) 2023, Tri Dao.
2
- # Inspired by https://github.com/NVIDIA/apex/blob/master/apex/fused_dense/fused_dense.py
3
- # We make it work with pytorch amp and with bfloat16.
4
- # The TensorParallel linear modules are inspired by https://github.com/NVIDIA/apex/blob/master/apex/transformer/tensor_parallel/layers.py
5
- from functools import partial
6
- from typing import Optional
7
-
8
- # import fused_dense_cuda # from apex
9
- import fused_dense_lib as fused_dense_cuda
10
- import torch
11
- import torch.nn as nn
12
- import torch.nn.functional as F
13
- from torch import Tensor
14
- from torch.distributed import ProcessGroup
15
-
16
- from flash_attn.utils.torch import custom_fwd, custom_bwd
17
- from flash_attn.ops.activations import gelu_bwd, relu_bwd, sqrelu_bwd, sqrelu_fwd
18
- from flash_attn.utils.distributed import (
19
- all_gather_raw,
20
- all_reduce,
21
- all_reduce_raw,
22
- reduce_scatter,
23
- reduce_scatter_raw,
24
- )
25
-
26
-
27
- class FusedDenseFunc(torch.autograd.Function):
28
- @staticmethod
29
- @custom_fwd
30
- def forward(
31
- ctx, x, weight, bias, return_residual=False, process_group=None, sequence_parallel=True
32
- ):
33
- """
34
- If process_group is not None and sequence_parallel=True, we're doing Tensor Parallel
35
- with sequence parallelism: we do an all_gather_raw of x before doing the matmul.
36
- """
37
- ctx.compute_weight_gradient = weight.requires_grad
38
- ctx.return_residual = return_residual
39
- ctx.process_group = process_group
40
- ctx.sequence_parallel = sequence_parallel
41
-
42
- if torch.is_autocast_enabled():
43
- x = x.to(dtype=torch.get_autocast_gpu_dtype())
44
- x = x.contiguous()
45
- if process_group is not None and sequence_parallel:
46
- # We want to kick off the all_gather early, before weight dtype conversion
47
- total_x, handle_x = all_gather_raw(x, process_group, async_op=True)
48
- else:
49
- total_x = x
50
-
51
- if torch.is_autocast_enabled():
52
- weight = weight.to(dtype=torch.get_autocast_gpu_dtype())
53
- bias = bias.to(dtype=torch.get_autocast_gpu_dtype()) if bias is not None else None
54
- weight = weight.contiguous()
55
- if process_group is not None and sequence_parallel:
56
- handle_x.wait()
57
- batch_shape, n = total_x.shape[:-1], total_x.shape[-1]
58
- batch_dim = batch_shape.numel()
59
- # https://github.com/pytorch/pytorch/blob/5b51849b48a7dbccd297286cc0110def4706f9e7/aten/src/ATen/native/cuda/Blas.cpp#L174
60
- if min(batch_dim, n, *weight.shape) > 65535 * 32:
61
- raise RuntimeError("fused_dense only supports matrix dims <= 2M")
62
- output = F.linear(total_x, weight, bias)
63
- if ctx.compute_weight_gradient:
64
- ctx.save_for_backward(x, weight)
65
- else:
66
- ctx.save_for_backward(weight)
67
- return output if not return_residual else (output, x)
68
-
69
- @staticmethod
70
- @custom_bwd
71
- def backward(ctx, grad_output, *args):
72
- grad_output = grad_output.contiguous()
73
- if ctx.return_residual:
74
- (grad_input,) = args
75
- grad_input = grad_input.contiguous()
76
- process_group = ctx.process_group
77
- sequence_parallel = ctx.sequence_parallel
78
- if ctx.compute_weight_gradient:
79
- x, weight = ctx.saved_tensors
80
- if process_group is not None and sequence_parallel:
81
- total_x, handle_x = all_gather_raw(x, process_group, async_op=True)
82
- else:
83
- total_x = x
84
- else:
85
- (weight,) = ctx.saved_tensors
86
- total_x = None
87
- batch_shape = grad_output.shape[:-1]
88
- batch_dim = batch_shape.numel()
89
- grad_output = grad_output.reshape(batch_dim, grad_output.shape[-1])
90
- if ctx.needs_input_grad[0]:
91
- if not ctx.return_residual:
92
- grad_input = F.linear(grad_output, weight.t())
93
- else:
94
- grad_input = torch.addmm(
95
- grad_input.reshape(batch_dim, grad_input.shape[-1]), grad_output, weight
96
- )
97
- grad_input = grad_input.reshape(*batch_shape, grad_input.shape[-1])
98
- if process_group is not None:
99
- reduce_fn = reduce_scatter_raw if sequence_parallel else all_reduce_raw
100
- grad_input, handle_grad_input = reduce_fn(grad_input, process_group, async_op=True)
101
- else:
102
- grad_input = None
103
- if ctx.needs_input_grad[1]:
104
- assert ctx.compute_weight_gradient
105
- if process_group is not None and sequence_parallel:
106
- handle_x.wait()
107
- grad_weight, grad_bias = fused_dense_cuda.linear_bias_wgrad(
108
- total_x.reshape(batch_dim, total_x.shape[-1]), grad_output, ctx.needs_input_grad[2]
109
- )
110
- else:
111
- grad_weight = None
112
- grad_bias = grad_output if ctx.needs_input_grad[2] else None
113
- if process_group is not None and ctx.needs_input_grad[0]:
114
- handle_grad_input.wait()
115
- return grad_input, grad_weight, grad_bias, None, None, None
116
-
117
-
118
- def fused_dense_func(
119
- x: Tensor,
120
- weight: Tensor,
121
- bias: Optional[Tensor] = None,
122
- return_residual: bool = False,
123
- process_group: Optional[ProcessGroup] = None,
124
- sequence_parallel: bool = True,
125
- ):
126
- dtype_eligible = x.dtype in [torch.float16, torch.bfloat16] or (
127
- x.dtype == torch.float32 and torch.is_autocast_enabled()
128
- )
129
- if x.is_cuda and weight.is_cuda and (bias is None or bias.is_cuda) and dtype_eligible:
130
- return FusedDenseFunc.apply(
131
- x, weight, bias, return_residual, process_group, sequence_parallel
132
- )
133
- else:
134
- assert process_group is None
135
- out = F.linear(x, weight, bias)
136
- return out if not return_residual else (out, x)
137
-
138
-
139
- class FusedDense(nn.Linear):
140
- def __init__(
141
- self,
142
- in_features: int,
143
- out_features: int,
144
- bias: bool = True,
145
- return_residual: bool = False,
146
- device=None,
147
- dtype=None,
148
- ) -> None:
149
- super().__init__(in_features, out_features, bias=bias, device=device, dtype=dtype)
150
- self.return_residual = return_residual
151
-
152
- def forward(self, x, process_group=None):
153
- """
154
- If process_group is not None, we're doing Tensor Parallel with sequence parallelism:
155
- we do an all_gather of x before doing the matmul.
156
- """
157
- return fused_dense_func(
158
- x,
159
- self.weight,
160
- self.bias,
161
- return_residual=self.return_residual,
162
- process_group=process_group,
163
- )
164
-
165
-
166
- class ColumnParallelLinear(nn.Linear):
167
- def __init__(
168
- self,
169
- in_features: int,
170
- out_features: int,
171
- process_group: ProcessGroup,
172
- bias: bool = True,
173
- sequence_parallel=True,
174
- multiple_of=1,
175
- device=None,
176
- dtype=None,
177
- ) -> None:
178
- world_size = torch.distributed.get_world_size(process_group)
179
- if out_features % multiple_of:
180
- raise ValueError(f"out_features ({out_features}) must be a multiple of {multiple_of}")
181
- multiple = out_features // multiple_of
182
- # We want to split @multiple across world_size, but it could be an uneven split
183
- div = multiple // world_size
184
- mod = multiple % world_size
185
- # The first @mod ranks get @div + 1 copies, the rest get @div copies
186
- local_multiple = div + int(torch.distributed.get_rank(process_group) < mod)
187
- super().__init__(
188
- in_features, local_multiple * multiple_of, bias=bias, device=device, dtype=dtype
189
- )
190
- self.process_group = process_group
191
- self.sequence_parallel = sequence_parallel
192
-
193
- def forward(self, x):
194
- # If self.sequence_parallel is True, we're doing Tensor Parallel with sequence parallelism:
195
- # we do an all_gather of x before doing the matmul.
196
- # If not, then the input is already gathered.
197
- return fused_dense_func(
198
- x,
199
- self.weight,
200
- self.bias,
201
- process_group=self.process_group,
202
- sequence_parallel=self.sequence_parallel,
203
- )
204
-
205
-
206
- class RowParallelLinear(nn.Linear):
207
- def __init__(
208
- self,
209
- in_features: int,
210
- out_features: int,
211
- process_group: ProcessGroup,
212
- bias: bool = True,
213
- sequence_parallel=True,
214
- multiple_of=1,
215
- device=None,
216
- dtype=None,
217
- ) -> None:
218
- world_size = torch.distributed.get_world_size(process_group)
219
- rank = torch.distributed.get_rank(process_group)
220
- if in_features % multiple_of:
221
- raise ValueError(f"in_features ({in_features}) must be a multiple of {multiple_of}")
222
- multiple = in_features // multiple_of
223
- # We want to split @multiple across world_size, but it could be an uneven split
224
- div = multiple // world_size
225
- mod = multiple % world_size
226
- # The first @mod ranks get @div + 1 copies, the rest get @div copies
227
- local_multiple = div + int(torch.distributed.get_rank(process_group) < mod)
228
- # Only rank 0 will have bias
229
- super().__init__(
230
- local_multiple * multiple_of,
231
- out_features,
232
- bias=bias and rank == 0,
233
- device=device,
234
- dtype=dtype,
235
- )
236
- self.process_group = process_group
237
- self.sequence_parallel = sequence_parallel
238
-
239
- def forward(self, x):
240
- """
241
- We're doing Tensor Parallel with sequence parallelism: we do the matmul and then
242
- a reduce_scatter of the result.
243
- """
244
- out = fused_dense_func(x, self.weight, self.bias)
245
- reduce_fn = reduce_scatter if self.sequence_parallel else all_reduce
246
- return reduce_fn(out, self.process_group)
247
-
248
-
249
- class FusedMLPFunc(torch.autograd.Function):
250
- @staticmethod
251
- @custom_fwd
252
- def forward(
253
- ctx,
254
- x,
255
- weight1,
256
- bias1,
257
- weight2,
258
- bias2,
259
- activation="gelu_approx",
260
- save_pre_act=True,
261
- return_residual=False,
262
- checkpoint_lvl=0,
263
- heuristic=0,
264
- process_group=None,
265
- sequence_parallel=True,
266
- ):
267
- """
268
- If process_group is not None and sequence_parallel=True, we're doing Tensor Parallel
269
- with sequence parallelism: we do an all_gather of x before doing the matmul.
270
- If sequence_parallel=False, then the input is already gathered.
271
-
272
- checkpoint_lvl:
273
- 0: no recomputation in the bwd
274
- 1: recompute gelu_out / relu_out in the bwd
275
- 2: recompute pre_act and gelu_out / relu_out in the bwd
276
- """
277
- assert -1 <= heuristic <= 4
278
- assert activation in ["gelu_approx", "relu", "sqrelu"]
279
- if activation == "sqrelu":
280
- assert heuristic == -1
281
- if not save_pre_act:
282
- checkpoint_lvl = 2
283
- assert checkpoint_lvl in [0, 1, 2]
284
- ctx.return_residual = return_residual
285
- ctx.process_group = process_group
286
- ctx.sequence_parallel = sequence_parallel
287
- ctx.checkpoint_lvl = checkpoint_lvl
288
- ctx.activation = activation
289
- ctx.heuristic = heuristic
290
-
291
- if torch.is_autocast_enabled():
292
- x = x.to(dtype=torch.get_autocast_gpu_dtype())
293
- x = x.contiguous()
294
- if process_group is not None and sequence_parallel:
295
- # We want to kick off the all_gather early, before weight dtype conversion
296
- total_x, handle_x = all_gather_raw(x, process_group, async_op=True)
297
- else:
298
- total_x = x
299
-
300
- if torch.is_autocast_enabled():
301
- dtype = torch.get_autocast_gpu_dtype()
302
- weight1, weight2 = [a.to(dtype=dtype) for a in [weight1, weight2]]
303
- bias1 = bias1.to(dtype=dtype) if bias1 is not None else None
304
- bias2 = bias2.to(dtype=dtype) if bias2 is not None else None
305
- weight1 = weight1.contiguous()
306
- bias1 = bias1.contiguous() if bias1 is not None else None
307
- weight2 = weight2.contiguous()
308
- bias2 = bias2.contiguous() if bias2 is not None else None
309
- if process_group is not None and sequence_parallel:
310
- handle_x.wait()
311
- batch_shape, n = total_x.shape[:-1], total_x.shape[-1]
312
- batch_dim = batch_shape.numel()
313
- # https://github.com/pytorch/pytorch/blob/5b51849b48a7dbccd297286cc0110def4706f9e7/aten/src/ATen/native/cuda/Blas.cpp#L174
314
- if min(batch_dim, n, *weight1.shape, *weight2.shape) > 65535 * 32:
315
- raise RuntimeError("fused_dense only supports matrix dims <= 2M")
316
- if heuristic == -1:
317
- pre_act = F.linear(total_x, weight1, bias1)
318
- activation_fn = (
319
- partial(F.gelu, approximate="tanh")
320
- if activation == "gelu_approx"
321
- else (sqrelu_fwd if activation == "sqrelu" else F.relu)
322
- )
323
- with torch.jit.fuser("fuser2"):
324
- output1 = activation_fn(pre_act)
325
- # This is before adding bias1
326
- # pre_act = F.linear(total_x.reshape(batch_dim, n), weight1)
327
- # with torch.jit.fuser('fuser2'):
328
- # output1 = bias_gelu(pre_act, bias1)
329
- else:
330
- is_gelu = activation == "gelu_approx"
331
- output1, *rest = fused_dense_cuda.linear_act_forward(
332
- total_x.reshape(batch_dim, n), weight1, bias1, is_gelu, save_pre_act, heuristic
333
- )
334
- if save_pre_act:
335
- pre_act = rest[0]
336
- output2 = F.linear(output1, weight2, bias2)
337
- if checkpoint_lvl == 0 or (checkpoint_lvl == 1 and activation == "relu"):
338
- # For RELU the pre_act is very small (just a bit-mask) so we just save it
339
- ctx.save_for_backward(x, weight1, weight2, pre_act, output1)
340
- elif checkpoint_lvl == 1:
341
- ctx.save_for_backward(x, weight1, weight2, pre_act)
342
- elif checkpoint_lvl == 2:
343
- ctx.save_for_backward(x, weight1, weight2, bias1)
344
- output2 = output2.reshape(*batch_shape, output2.shape[-1])
345
- return output2 if not return_residual else (output2, x)
346
-
347
- @staticmethod
348
- @custom_bwd
349
- def backward(ctx, grad_output, *args):
350
- grad_output = grad_output.contiguous()
351
- checkpoint_lvl = ctx.checkpoint_lvl
352
- activation = ctx.activation
353
- activation_fn = (
354
- partial(F.gelu, approximate="tanh")
355
- if activation == "gelu_approx"
356
- else (sqrelu_fwd if activation == "sqrelu" else F.relu)
357
- )
358
- if ctx.return_residual:
359
- (grad_input,) = args
360
- grad_input = grad_input.contiguous()
361
- process_group = ctx.process_group
362
- sequence_parallel = ctx.sequence_parallel
363
- x, weight1, weight2, *rest = ctx.saved_tensors
364
- if process_group is None or not sequence_parallel:
365
- total_x = x
366
- batch_shape = grad_output.shape[:-1]
367
- batch_dim = batch_shape.numel()
368
- if checkpoint_lvl in [0, 1]:
369
- if process_group is not None and sequence_parallel:
370
- total_x, handle_x = all_gather_raw(x, process_group, async_op=True)
371
- if checkpoint_lvl == 0 or (checkpoint_lvl == 1 and activation == "relu"):
372
- pre_act, output1 = rest
373
- elif checkpoint_lvl == 1:
374
- (pre_act,) = rest
375
- with torch.jit.fuser("fuser2"):
376
- output1 = activation_fn(pre_act)
377
- elif checkpoint_lvl == 2:
378
- (bias1,) = rest
379
- if process_group is not None and sequence_parallel:
380
- total_x, _ = all_gather_raw(x, process_group)
381
- if ctx.heuristic == -1:
382
- pre_act = F.linear(total_x, weight1, bias1)
383
- with torch.jit.fuser("fuser2"):
384
- output1 = activation_fn(pre_act)
385
- else:
386
- output1, pre_act = fused_dense_cuda.linear_act_forward(
387
- total_x.reshape(batch_dim, total_x.shape[-1]),
388
- weight1,
389
- bias1,
390
- activation == "gelu_approx",
391
- True,
392
- ctx.heuristic,
393
- )
394
-
395
- grad_output = grad_output.reshape(batch_dim, grad_output.shape[-1])
396
- output1 = output1.reshape(batch_dim, output1.shape[-1])
397
- pre_act = pre_act.reshape(batch_dim, pre_act.shape[-1])
398
- if ctx.needs_input_grad[3]:
399
- grad_weight2, grad_bias2 = fused_dense_cuda.linear_bias_wgrad(
400
- output1, grad_output, ctx.needs_input_grad[4]
401
- )
402
- else:
403
- grad_weight2 = None
404
- grad_bias2 = grad_output if ctx.needs_input_grad[4] else None
405
- if ctx.heuristic == -1:
406
- # grad_pre_act = matmul_dgelu(grad_output, weight2, pre_act)
407
- grad_output1 = F.linear(grad_output, weight2.t())
408
- activation_grad_fn = (
409
- gelu_bwd
410
- if activation == "gelu_approx"
411
- else (sqrelu_bwd if activation == "sqrelu" else relu_bwd)
412
- )
413
- with torch.jit.fuser("fuser2"):
414
- grad_pre_act = activation_grad_fn(grad_output1, pre_act)
415
- else:
416
- # The cublasLt epilogue has to compute both gelu/relu grad and bias grad, we can't
417
- # just compute gelu/relu grad
418
- grad_pre_act, grad_bias1 = fused_dense_cuda.bias_act_linear_dgrad_bgrad(
419
- weight2, grad_output, pre_act, activation == "gelu_approx", ctx.heuristic
420
- )
421
- if not ctx.needs_input_grad[2]:
422
- grad_bias1 = None
423
- if ctx.needs_input_grad[0]:
424
- if not ctx.return_residual:
425
- grad_input = F.linear(grad_pre_act, weight1.t())
426
- else:
427
- grad_input = torch.addmm(
428
- grad_input.reshape(batch_dim, grad_input.shape[-1]), grad_pre_act, weight1
429
- )
430
- grad_input = grad_input.reshape(*batch_shape, grad_input.shape[-1])
431
- if process_group is not None:
432
- reduce_fn = reduce_scatter_raw if sequence_parallel else all_reduce_raw
433
- grad_input, handle_grad_input = reduce_fn(grad_input, process_group, async_op=True)
434
- else:
435
- grad_input = None
436
- if ctx.heuristic == -1:
437
- if ctx.needs_input_grad[1]:
438
- if process_group is not None and sequence_parallel and checkpoint_lvl != 2:
439
- handle_x.wait()
440
- grad_weight1, grad_bias1 = fused_dense_cuda.linear_bias_wgrad(
441
- total_x.reshape(batch_dim, total_x.shape[-1]),
442
- grad_pre_act,
443
- ctx.needs_input_grad[2],
444
- )
445
- else:
446
- grad_weight1 = None
447
- grad_bias1 = grad_pre_act if ctx.needs_input_grad[2] else None
448
- else:
449
- if ctx.needs_input_grad[1]:
450
- if process_group is not None and sequence_parallel and checkpoint_lvl != 2:
451
- handle_x.wait()
452
- grad_weight1 = F.linear(
453
- grad_pre_act.t(), total_x.reshape(batch_dim, total_x.shape[-1]).t()
454
- )
455
- else:
456
- grad_weight1 = None
457
- if process_group is not None and ctx.needs_input_grad[0]:
458
- handle_grad_input.wait()
459
- return (
460
- grad_input,
461
- grad_weight1,
462
- grad_bias1,
463
- grad_weight2,
464
- grad_bias2,
465
- None,
466
- None,
467
- None,
468
- None,
469
- None,
470
- None,
471
- None,
472
- )
473
-
474
-
475
- def fused_mlp_func(
476
- x: Tensor,
477
- weight1: Tensor,
478
- weight2: Tensor,
479
- bias1: Optional[Tensor] = None,
480
- bias2: Optional[Tensor] = None,
481
- activation: str = "gelu_approx",
482
- save_pre_act: bool = True,
483
- return_residual: bool = False,
484
- checkpoint_lvl: int = 0,
485
- heuristic: int = 0,
486
- process_group: Optional[ProcessGroup] = None,
487
- sequence_parallel: bool = True,
488
- ):
489
- assert activation in ["gelu_approx", "relu", "sqrelu"]
490
- dtype_eligible = x.dtype in [torch.float16, torch.bfloat16] or (
491
- x.dtype == torch.float32 and torch.is_autocast_enabled()
492
- )
493
- # If we save pre-activation, dimension must be divisible by 128 (relu) or 8 (gelu)
494
- dim_eligible = not save_pre_act or (x.shape[-1] % (128 if activation == "relu" else 8) == 0)
495
- if (
496
- x.is_cuda
497
- and weight1.is_cuda
498
- and weight2.is_cuda
499
- and (bias1 is None or bias1.is_cuda)
500
- and (bias2 is None or bias2.is_cuda)
501
- and dtype_eligible
502
- and dim_eligible
503
- ):
504
- return FusedMLPFunc.apply(
505
- x,
506
- weight1,
507
- bias1,
508
- weight2,
509
- bias2,
510
- activation,
511
- save_pre_act,
512
- return_residual,
513
- checkpoint_lvl,
514
- heuristic,
515
- process_group,
516
- sequence_parallel,
517
- )
518
- else:
519
- assert process_group is None
520
- pre_act = F.linear(x, weight1, bias1)
521
- activation_fn = (
522
- partial(F.gelu, approximate="tanh")
523
- if activation == "gelu_approx"
524
- else partial(F.relu, inplace=True)
525
- )
526
- output1 = activation_fn(pre_act)
527
- output2 = F.linear(output1, weight2, bias2)
528
- return output2 if not return_residual else (output2, x)
529
-
530
-
531
- class FusedMLP(nn.Module):
532
- def __init__(
533
- self,
534
- in_features,
535
- hidden_features=None,
536
- out_features=None,
537
- bias1=True,
538
- bias2=True,
539
- activation="gelu_approx",
540
- return_residual=False,
541
- checkpoint_lvl=0,
542
- heuristic="auto",
543
- device=None,
544
- dtype=None,
545
- ):
546
- """
547
- If process_group is not None, we're doing Tensor Parallel with sequence parallelism:
548
- we do an all_gather of x before doing the matmul, gelu, then matmul.
549
- Finally we do a reduce_scatter of the output.
550
-
551
- checkpoint_lvl (increasing lvl means slower but more memory saving):
552
- 0: no recomputation in the bwd
553
- 1: recompute gelu_out in the bwd
554
- 2: recompute pre_act and gelu_out in the bwd
555
- heuristic:
556
- -1: don't fuse gemm + gelu (separate kernel)
557
- 0..4: use this heuristic for the algo section in the fused gemm + gelu
558
- 'auto': heuristic will be picked automatically:
559
- For CUDA >= 11.8, we set heuristic=0 for both fp16 and bf16 for best perf.
560
- For CUDA <= 11.7, we set heuristic=1 for fp16 and heuristic=-1 for bf16.
561
- For H100, we set heuristic=-1 for both fp16 and bf16 as the fused cuBlasLt implementation
562
- is slower than the unfused version.
563
- return_residual: whether to return the input x along with the output. This is for
564
- performance reason: for post-norm architecture, returning the input allows us
565
- to fuse the backward of nn.Linear with the residual connection.
566
- """
567
- assert checkpoint_lvl in [0, 1, 2]
568
- assert activation in ["gelu_approx", "relu", "sqrelu"]
569
- factory_kwargs = {"device": device, "dtype": dtype}
570
- super().__init__()
571
- out_features = out_features or in_features
572
- hidden_features = hidden_features or in_features * 4
573
- self.activation = activation
574
- self.return_residual = return_residual
575
- self.checkpoint_lvl = checkpoint_lvl
576
- self.heuristic = heuristic if activation != "sqrelu" else -1
577
- self.fc1 = nn.Linear(in_features, hidden_features, bias=bias1, **factory_kwargs)
578
- self.fc2 = nn.Linear(hidden_features, out_features, bias=bias2, **factory_kwargs)
579
-
580
- def forward(self, x, process_group=None):
581
- dtype = x.dtype if not torch.is_autocast_enabled() else torch.get_autocast_gpu_dtype()
582
- if self.heuristic == "auto":
583
- if self.activation == "gelu_approx":
584
- if torch.cuda.get_device_capability("cuda") == (9, 0):
585
- heuristic = -1
586
- else:
587
- cuda_ver = tuple(map(int, torch.version.cuda.split(".")))
588
- heuristic = 0 if cuda_ver >= (11, 8) else (1 if dtype == torch.float16 else -1)
589
- else:
590
- heuristic = 0
591
- else:
592
- heuristic = self.heuristic
593
- out = fused_mlp_func(
594
- x,
595
- self.fc1.weight,
596
- self.fc2.weight,
597
- self.fc1.bias,
598
- self.fc2.bias,
599
- activation=self.activation,
600
- save_pre_act=self.training,
601
- return_residual=self.return_residual,
602
- checkpoint_lvl=self.checkpoint_lvl,
603
- heuristic=heuristic,
604
- process_group=process_group,
605
- )
606
- if self.return_residual:
607
- out, x = out
608
- if process_group is not None:
609
- out = reduce_scatter(out, process_group)
610
- return out if not self.return_residual else (out, x)
611
-
612
-
613
- class ParallelFusedMLP(nn.Module):
614
- def __init__(
615
- self,
616
- in_features,
617
- hidden_features=None,
618
- out_features=None,
619
- activation="gelu_approx",
620
- process_group: ProcessGroup = None,
621
- bias1=True,
622
- bias2=True,
623
- sequence_parallel=True,
624
- checkpoint_lvl=0,
625
- heuristic="auto",
626
- device=None,
627
- dtype=None,
628
- ):
629
- """
630
- process_group is required. We're doing Tensor Parallel with sequence parallelism:
631
- we do an all_gather of x before doing the matmul, gelu, then matmul.
632
- Finally we do a reduce_scatter of the output.
633
-
634
- checkpoint_lvl (increasing lvl means slower but more memory saving):
635
- 0: no recomputation in the bwd
636
- 1: recompute gelu_out in the bwd
637
- 2: recompute pre_act and gelu_out in the bwd
638
- heuristic:
639
- -1: don't fuse gemm + gelu (separate kernel)
640
- 0..4: use this heuristic for the algo section in the fused gemm + gelu
641
- 'auto': heuristic will be picked automatically:
642
- For CUDA >= 11.8, we set heuristic=0 for both fp16 and bf16 for best perf.
643
- For CUDA <= 11.7, we set heuristic=1 for fp16 and heuristic=-1 for bf16.
644
- """
645
- assert checkpoint_lvl in [0, 1, 2]
646
- assert activation in ["gelu_approx", "relu", "sqrelu"]
647
- assert process_group is not None
648
- factory_kwargs = {"device": device, "dtype": dtype}
649
- super().__init__()
650
- out_features = out_features or in_features
651
- hidden_features = hidden_features or in_features * 4
652
- self.activation = activation
653
- self.process_group = process_group
654
- self.sequence_parallel = sequence_parallel
655
- self.checkpoint_lvl = checkpoint_lvl
656
- self.heuristic = heuristic if activation != "sqrelu" else -1
657
- self.fc1 = ColumnParallelLinear(
658
- in_features, hidden_features, process_group, bias=bias1, **factory_kwargs
659
- )
660
- self.fc2 = RowParallelLinear(
661
- hidden_features, out_features, process_group, bias=bias2, **factory_kwargs
662
- )
663
-
664
- def forward(self, x):
665
- dtype = x.dtype if not torch.is_autocast_enabled() else torch.get_autocast_gpu_dtype()
666
- if self.heuristic == "auto":
667
- if self.activation == "gelu_approx":
668
- cuda_ver = tuple(map(int, torch.version.cuda.split(".")))
669
- heuristic = 0 if cuda_ver >= (11, 8) else (1 if dtype == torch.float16 else -1)
670
- else:
671
- heuristic = 0
672
- else:
673
- heuristic = self.heuristic
674
- out = fused_mlp_func(
675
- x,
676
- self.fc1.weight,
677
- self.fc2.weight,
678
- self.fc1.bias,
679
- self.fc2.bias,
680
- activation=self.activation,
681
- save_pre_act=self.training,
682
- checkpoint_lvl=self.checkpoint_lvl,
683
- heuristic=heuristic,
684
- process_group=self.process_group,
685
- sequence_parallel=self.sequence_parallel,
686
- )
687
- reduce_fn = reduce_scatter if self.sequence_parallel else all_reduce
688
- return reduce_fn(out, self.process_group)