Crystalcareai commited on
Commit
d3e1600
·
verified ·
1 Parent(s): c6e50cc

Update modeling_quiet.py

Browse files
Files changed (1) hide show
  1. modeling_quiet.py +57 -57
modeling_quiet.py CHANGED
@@ -36,7 +36,7 @@ from transformers import TextStreamer, AutoTokenizer
36
 
37
  from transformers.activations import ACT2FN
38
  from transformers.cache_utils import Cache, DynamicCache
39
- from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask
40
  from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
41
  from transformers.modeling_utils import PreTrainedModel
42
  from transformers.utils import (
@@ -65,62 +65,62 @@ logger = logging.get_logger(__name__)
65
  _CONFIG_FOR_DOC = "QuietConfig"
66
 
67
 
68
- def _prepare_4d_causal_attention_mask_for_sdpa(attention_mask, input_shape, inputs_embeds, past_key_values_length):
69
- # Compute the attention mask correctly
70
- bsz, tgt_len = input_shape
71
-
72
- # Create a 4D attention mask from a 2D tensor mask.
73
- # The shape of the output attention mask is (batch_size, 1, tgt_len, src_len)
74
- # The values are either 0 or 1, where 0 means padding and 1 means non-padding.
75
- combined_attention_mask = None
76
- if attention_mask is not None:
77
- # What if attention_mask is not None and has a shape of (batch_size, 1, tgt_len, src_len)
78
- # In this case, we can just use it directly.
79
- if attention_mask.dim() == 4:
80
- combined_attention_mask = attention_mask
81
- # What if attention_mask is not None and has a shape of (batch_size, 1, tgt_len)
82
- # In this case, we need to expand it to (batch_size, 1, tgt_len, src_len)
83
- elif attention_mask.dim() == 3:
84
- expanded_attn_mask = attention_mask[:, None, :, :]
85
- combined_attention_mask = expanded_attn_mask
86
- # What if attention_mask is not None and has a shape of (batch_size, tgt_len)
87
- # In this case, we need to expand it to (batch_size, 1, tgt_len, src_len)
88
- elif attention_mask.dim() == 2:
89
- # Provided a padding mask of dimensions [batch_size, seq_length]
90
- # - if the model is a decoder, apply a causal mask in addition to the padding mask
91
- # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
92
- if past_key_values_length > 0:
93
- attention_mask = attention_mask.to(dtype=torch.long)
94
- attention_mask = attention_mask[:, past_key_values_length:]
95
- expanded_attn_mask = attention_mask[:, None, None, :]
96
- combined_attention_mask = expanded_attn_mask
97
- else:
98
- raise ValueError(
99
- "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
100
- input_shape, attention_mask.shape
101
- )
102
- )
103
-
104
- # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
105
- # masked positions, this operation will create a tensor which is 0.0 for
106
- # positions we want to attend and -10000.0 for masked positions.
107
- # Since we are adding it to the raw scores before the softmax, this is
108
- # effectively the same as removing these entirely.
109
- if combined_attention_mask is not None:
110
- # Ensure the attention mask values are within a reasonable range
111
- combined_attention_mask = combined_attention_mask.clamp(min=0, max=1)
112
-
113
- # Convert the attention mask to bfloat16
114
- combined_attention_mask = combined_attention_mask.to(torch.bfloat16)
115
-
116
- # Normalize the attention mask values to be between 0 and 1
117
- combined_attention_mask = (1.0 - combined_attention_mask) * -10000.0
118
- else:
119
- combined_attention_mask = torch.zeros(
120
- (bsz, 1, tgt_len, tgt_len), dtype=torch.bfloat16, device=inputs_embeds.device
121
- )
122
-
123
- return combined_attention_mask
124
 
125
 
126
  # Copied from transformers.models.llama.modeling_llama._get_unpad_data
 
36
 
37
  from transformers.activations import ACT2FN
38
  from transformers.cache_utils import Cache, DynamicCache
39
+ from transformers.modeling_attn_mask_utils import _prepare_4d_causal_attention_mask, _prepare_4d_causal_attention_mask_for_sdpa
40
  from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, SequenceClassifierOutputWithPast
41
  from transformers.modeling_utils import PreTrainedModel
42
  from transformers.utils import (
 
65
  _CONFIG_FOR_DOC = "QuietConfig"
66
 
67
 
68
+ # def _prepare_4d_causal_attention_mask_for_sdpa(attention_mask, input_shape, inputs_embeds, past_key_values_length):
69
+ # # Compute the attention mask correctly
70
+ # bsz, tgt_len = input_shape
71
+
72
+ # # Create a 4D attention mask from a 2D tensor mask.
73
+ # # The shape of the output attention mask is (batch_size, 1, tgt_len, src_len)
74
+ # # The values are either 0 or 1, where 0 means padding and 1 means non-padding.
75
+ # combined_attention_mask = None
76
+ # if attention_mask is not None:
77
+ # # What if attention_mask is not None and has a shape of (batch_size, 1, tgt_len, src_len)
78
+ # # In this case, we can just use it directly.
79
+ # if attention_mask.dim() == 4:
80
+ # combined_attention_mask = attention_mask
81
+ # # What if attention_mask is not None and has a shape of (batch_size, 1, tgt_len)
82
+ # # In this case, we need to expand it to (batch_size, 1, tgt_len, src_len)
83
+ # elif attention_mask.dim() == 3:
84
+ # expanded_attn_mask = attention_mask[:, None, :, :]
85
+ # combined_attention_mask = expanded_attn_mask
86
+ # # What if attention_mask is not None and has a shape of (batch_size, tgt_len)
87
+ # # In this case, we need to expand it to (batch_size, 1, tgt_len, src_len)
88
+ # elif attention_mask.dim() == 2:
89
+ # # Provided a padding mask of dimensions [batch_size, seq_length]
90
+ # # - if the model is a decoder, apply a causal mask in addition to the padding mask
91
+ # # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
92
+ # if past_key_values_length > 0:
93
+ # attention_mask = attention_mask.to(dtype=torch.long)
94
+ # attention_mask = attention_mask[:, past_key_values_length:]
95
+ # expanded_attn_mask = attention_mask[:, None, None, :]
96
+ # combined_attention_mask = expanded_attn_mask
97
+ # else:
98
+ # raise ValueError(
99
+ # "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
100
+ # input_shape, attention_mask.shape
101
+ # )
102
+ # )
103
+
104
+ # # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
105
+ # # masked positions, this operation will create a tensor which is 0.0 for
106
+ # # positions we want to attend and -10000.0 for masked positions.
107
+ # # Since we are adding it to the raw scores before the softmax, this is
108
+ # # effectively the same as removing these entirely.
109
+ # if combined_attention_mask is not None:
110
+ # # Ensure the attention mask values are within a reasonable range
111
+ # combined_attention_mask = combined_attention_mask.clamp(min=0, max=1)
112
+
113
+ # # Convert the attention mask to bfloat16
114
+ # combined_attention_mask = combined_attention_mask.to(torch.bfloat16)
115
+
116
+ # # Normalize the attention mask values to be between 0 and 1
117
+ # combined_attention_mask = (1.0 - combined_attention_mask) * -10000.0
118
+ # else:
119
+ # combined_attention_mask = torch.zeros(
120
+ # (bsz, 1, tgt_len, tgt_len), dtype=torch.bfloat16, device=inputs_embeds.device
121
+ # )
122
+
123
+ # return combined_attention_mask
124
 
125
 
126
  # Copied from transformers.models.llama.modeling_llama._get_unpad_data