Hiveurban commited on
Commit
f030f41
·
verified ·
1 Parent(s): c8c8b02

Upload BertForSyntaxParsing.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. BertForSyntaxParsing.py +312 -0
BertForSyntaxParsing.py ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from transformers.utils import ModelOutput
3
+ import torch
4
+ from torch import nn
5
+ from typing import Dict, List, Tuple, Optional, Union
6
+ from dataclasses import dataclass
7
+ from transformers import BertPreTrainedModel, BertModel, BertTokenizerFast
8
+
9
+ ALL_FUNCTION_LABELS = ["nsubj", "nsubj:cop", "punct", "mark", "mark:q", "case", "case:gen", "case:acc", "fixed", "obl", "det", "amod", "acl:relcl", "nmod", "cc", "conj", "root", "compound:smixut", "cop", "compound:affix", "advmod", "nummod", "appos", "nsubj:pass", "nmod:poss", "xcomp", "obj", "aux", "parataxis", "advcl", "ccomp", "csubj", "acl", "obl:tmod", "csubj:pass", "dep", "dislocated", "nmod:tmod", "nmod:npmod", "flat", "obl:npmod", "goeswith", "reparandum", "orphan", "list", "discourse", "iobj", "vocative", "expl", "flat:name"]
10
+
11
+ @dataclass
12
+ class SyntaxLogitsOutput(ModelOutput):
13
+ dependency_logits: torch.FloatTensor = None
14
+ function_logits: torch.FloatTensor = None
15
+ dependency_head_indices: torch.LongTensor = None
16
+
17
+ def detach(self):
18
+ return SyntaxTaggingOutput(self.dependency_logits.detach(), self.function_logits.detach(), self.dependency_head_indices.detach())
19
+
20
+ @dataclass
21
+ class SyntaxTaggingOutput(ModelOutput):
22
+ loss: Optional[torch.FloatTensor] = None
23
+ logits: Optional[SyntaxLogitsOutput] = None
24
+ hidden_states: Optional[Tuple[torch.FloatTensor]] = None
25
+ attentions: Optional[Tuple[torch.FloatTensor]] = None
26
+
27
+ @dataclass
28
+ class SyntaxLabels(ModelOutput):
29
+ dependency_labels: Optional[torch.LongTensor] = None
30
+ function_labels: Optional[torch.LongTensor] = None
31
+
32
+ def detach(self):
33
+ return SyntaxLabels(self.dependency_labels.detach(), self.function_labels.detach())
34
+
35
+ def to(self, device):
36
+ return SyntaxLabels(self.dependency_labels.to(device), self.function_labels.to(device))
37
+
38
+ class BertSyntaxParsingHead(nn.Module):
39
+ def __init__(self, config):
40
+ super().__init__()
41
+ self.config = config
42
+
43
+ # the attention query & key values
44
+ self.head_size = config.syntax_head_size# int(config.hidden_size / config.num_attention_heads * 2)
45
+ self.query = nn.Linear(config.hidden_size, self.head_size)
46
+ self.key = nn.Linear(config.hidden_size, self.head_size)
47
+ # the function classifier gets two encoding values and predicts the labels
48
+ self.num_function_classes = len(ALL_FUNCTION_LABELS)
49
+ self.cls = nn.Linear(config.hidden_size * 2, self.num_function_classes)
50
+
51
+ def forward(
52
+ self,
53
+ hidden_states: torch.Tensor,
54
+ extended_attention_mask: Optional[torch.Tensor],
55
+ labels: Optional[SyntaxLabels] = None,
56
+ compute_mst: bool = False) -> Tuple[torch.Tensor, SyntaxLogitsOutput]:
57
+
58
+ # Take the dot product between "query" and "key" to get the raw attention scores.
59
+ query_layer = self.query(hidden_states)
60
+ key_layer = self.key(hidden_states)
61
+ attention_scores = torch.bmm(query_layer, key_layer.transpose(-1, -2)) / math.sqrt(self.head_size)
62
+
63
+ # add in the attention mask
64
+ if extended_attention_mask is not None:
65
+ if extended_attention_mask.ndim == 4:
66
+ extended_attention_mask = extended_attention_mask.squeeze(1)
67
+ attention_scores += extended_attention_mask# batch x seq x seq
68
+
69
+ # At this point take the hidden_state of the word and of the dependency word, and predict the function
70
+ # If labels are provided, use the labels.
71
+ if self.training and labels is not None:
72
+ # Note that the labels can have -100, so just set those to zero with a max
73
+ dep_indices = labels.dependency_labels.clamp_min(0)
74
+ # Otherwise - check if he wants the MST or just the argmax
75
+ elif compute_mst:
76
+ dep_indices = compute_mst_tree(attention_scores, extended_attention_mask)
77
+ else:
78
+ dep_indices = torch.argmax(attention_scores, dim=-1)
79
+
80
+ # After we retrieved the dependency indicies, create a tensor of teh batch indices, and and retrieve the vectors of the heads to calculate the function
81
+ batch_indices = torch.arange(dep_indices.size(0)).view(-1, 1).expand(-1, dep_indices.size(1)).to(dep_indices.device)
82
+ dep_vectors = hidden_states[batch_indices, dep_indices, :] # batch x seq x dim
83
+
84
+ # concatenate that with the last hidden states, and send to the classifier output
85
+ cls_inputs = torch.cat((hidden_states, dep_vectors), dim=-1)
86
+ function_logits = self.cls(cls_inputs)
87
+
88
+ loss = None
89
+ if labels is not None:
90
+ loss_fct = nn.CrossEntropyLoss()
91
+ # step 1: dependency scores loss - this is applied to the attention scores
92
+ loss = loss_fct(attention_scores.view(-1, hidden_states.size(-2)), labels.dependency_labels.view(-1))
93
+ # step 2: function loss
94
+ loss += loss_fct(function_logits.view(-1, self.num_function_classes), labels.function_labels.view(-1))
95
+
96
+ return (loss, SyntaxLogitsOutput(attention_scores, function_logits, dep_indices))
97
+
98
+
99
+ class BertForSyntaxParsing(BertPreTrainedModel):
100
+
101
+ def __init__(self, config):
102
+ super().__init__(config)
103
+
104
+ self.bert = BertModel(config, add_pooling_layer=False)
105
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
106
+ self.syntax = BertSyntaxParsingHead(config)
107
+
108
+ # Initialize weights and apply final processing
109
+ self.post_init()
110
+
111
+ def forward(
112
+ self,
113
+ input_ids: Optional[torch.Tensor] = None,
114
+ attention_mask: Optional[torch.Tensor] = None,
115
+ token_type_ids: Optional[torch.Tensor] = None,
116
+ position_ids: Optional[torch.Tensor] = None,
117
+ labels: Optional[SyntaxLabels] = None,
118
+ head_mask: Optional[torch.Tensor] = None,
119
+ inputs_embeds: Optional[torch.Tensor] = None,
120
+ output_attentions: Optional[bool] = None,
121
+ output_hidden_states: Optional[bool] = None,
122
+ return_dict: Optional[bool] = None,
123
+ compute_syntax_mst: Optional[bool] = None,
124
+ ):
125
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
126
+
127
+ bert_outputs = self.bert(
128
+ input_ids,
129
+ attention_mask=attention_mask,
130
+ token_type_ids=token_type_ids,
131
+ position_ids=position_ids,
132
+ head_mask=head_mask,
133
+ inputs_embeds=inputs_embeds,
134
+ output_attentions=output_attentions,
135
+ output_hidden_states=output_hidden_states,
136
+ return_dict=return_dict,
137
+ )
138
+
139
+ extended_attention_mask = None
140
+ if attention_mask is not None:
141
+ extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_ids.size())
142
+ # apply the syntax head
143
+ loss, logits = self.syntax(self.dropout(bert_outputs[0]), extended_attention_mask, labels, compute_syntax_mst)
144
+
145
+ if not return_dict:
146
+ return (loss,(logits.dependency_logits, logits.function_logits)) + bert_outputs[2:]
147
+
148
+ return SyntaxTaggingOutput(
149
+ loss=loss,
150
+ logits=logits,
151
+ hidden_states=bert_outputs.hidden_states,
152
+ attentions=bert_outputs.attentions,
153
+ )
154
+
155
+ def predict(self, sentences: Union[str, List[str]], tokenizer: BertTokenizerFast, compute_mst=True):
156
+ if isinstance(sentences, str):
157
+ sentences = [sentences]
158
+
159
+ # predict the logits for the sentence
160
+ inputs = tokenizer(sentences, padding='longest', truncation=True, return_tensors='pt')
161
+ inputs = {k:v.to(self.device) for k,v in inputs.items()}
162
+ logits = self.forward(**inputs, return_dict=True, compute_syntax_mst=compute_mst).logits
163
+ return parse_logits(inputs['input_ids'].tolist(), sentences, tokenizer, logits)
164
+
165
+ def parse_logits(input_ids: List[List[int]], sentences: List[str], tokenizer: BertTokenizerFast, logits: SyntaxLogitsOutput):
166
+ outputs = []
167
+
168
+ special_toks = tokenizer.all_special_tokens
169
+ for i in range(len(sentences)):
170
+ deps = logits.dependency_head_indices[i].tolist()
171
+ funcs = logits.function_logits.argmax(-1)[i].tolist()
172
+ toks = [tok for tok in tokenizer.convert_ids_to_tokens(input_ids[i]) if tok not in special_toks]
173
+
174
+ # first, go through the tokens and create a mapping between each dependency index and the index without wordpieces
175
+ # wordpieces. At the same time, append the wordpieces in
176
+ idx_mapping = {-1:-1} # default root
177
+ real_idx = -1
178
+ for i in range(len(toks)):
179
+ if not toks[i].startswith('##'):
180
+ real_idx += 1
181
+ idx_mapping[i] = real_idx
182
+
183
+ # build our tree, keeping tracking of the root idx
184
+ tree = []
185
+ root_idx = 0
186
+ for i in range(len(toks)):
187
+ if toks[i].startswith('##'):
188
+ tree[-1]['word'] += toks[i][2:]
189
+ continue
190
+
191
+ dep_idx = deps[i + 1] - 1 # increase 1 for cls, decrease 1 for cls
192
+ if dep_idx == len(toks): dep_idx = i - 1 # if he predicts sep, then just point to the previous word
193
+
194
+ dep_head = 'root' if dep_idx == -1 else toks[dep_idx]
195
+ dep_func = ALL_FUNCTION_LABELS[funcs[i + 1]]
196
+
197
+ if dep_head == 'root': root_idx = len(tree)
198
+ tree.append(dict(word=toks[i], dep_head_idx=idx_mapping[dep_idx], dep_func=dep_func))
199
+ # append the head word
200
+ for d in tree:
201
+ d['dep_head'] = tree[d['dep_head_idx']]['word']
202
+
203
+ outputs.append(dict(tree=tree, root_idx=root_idx))
204
+ return outputs
205
+
206
+
207
+ def compute_mst_tree(attention_scores: torch.Tensor, extended_attention_mask: torch.LongTensor):
208
+ # attention scores should be 3 dimensions - batch x seq x seq (if it is 2 - just unsqueeze)
209
+ if attention_scores.ndim == 2: attention_scores = attention_scores.unsqueeze(0)
210
+ if attention_scores.ndim != 3 or attention_scores.shape[1] != attention_scores.shape[2]:
211
+ raise ValueError(f'Expected attention scores to be of shape batch x seq x seq, instead got {attention_scores.shape}')
212
+
213
+ batch_size, seq_len, _ = attention_scores.shape
214
+ # start by softmaxing so the scores are comparable
215
+ attention_scores = attention_scores.softmax(dim=-1)
216
+
217
+ batch_indices = torch.arange(batch_size, device=attention_scores.device)
218
+ seq_indices = torch.arange(seq_len, device=attention_scores.device)
219
+
220
+ seq_lens = torch.full((batch_size,), seq_len)
221
+
222
+ if extended_attention_mask is not None:
223
+ seq_lens = torch.argmax((extended_attention_mask != 0).int(), dim=2).squeeze(1)
224
+ # zero out any padding
225
+ attention_scores[extended_attention_mask.squeeze(1) != 0] = 0
226
+
227
+ # set the values for the CLS and sep to all by very low, so they never get chosen as a replacement arc
228
+ attention_scores[:, 0, :] = 0
229
+ attention_scores[batch_indices, seq_lens - 1, :] = 0
230
+ attention_scores[batch_indices, :, seq_lens - 1] = 0 # can never predict sep
231
+ # set the values for each token pointing to itself be 0
232
+ attention_scores[:, seq_indices, seq_indices] = 0
233
+
234
+ # find the root, and make him super high so we never have a conflict
235
+ root_cands = torch.argsort(attention_scores[:, :, 0], dim=-1)
236
+ attention_scores[batch_indices.unsqueeze(1), root_cands, 0] = 0
237
+ attention_scores[batch_indices, root_cands[:, -1], 0] = 1.0
238
+
239
+ # we start by getting the argmax for each score, and then computing the cycles and contracting them
240
+ sorted_indices = torch.argsort(attention_scores, dim=-1, descending=True)
241
+ indices = sorted_indices[:, :, 0].clone() # take the argmax
242
+
243
+ attention_scores = attention_scores.tolist()
244
+ seq_lens = seq_lens.tolist()
245
+ sorted_indices = [[sub_l[:slen] for sub_l in l[:slen]] for l,slen in zip(sorted_indices.tolist(), seq_lens)]
246
+
247
+
248
+ # go through each batch item and make sure our tree works
249
+ for batch_idx in range(batch_size):
250
+ # We have one root - detect the cycles and contract them. A cycle can never contain the root so really
251
+ # for every cycle, we look at all the nodes, and find the highest arc out of the cycle for any values. Replace that and tada
252
+ has_cycle, cycle_nodes = detect_cycle(indices[batch_idx], seq_lens[batch_idx])
253
+ contracted_arcs = set()
254
+ while has_cycle:
255
+ base_idx, head_idx = choose_contracting_arc(indices[batch_idx], sorted_indices[batch_idx], cycle_nodes, contracted_arcs, seq_lens[batch_idx], attention_scores[batch_idx])
256
+ indices[batch_idx, base_idx] = head_idx
257
+ contracted_arcs.add(base_idx)
258
+ # find the next cycle
259
+ has_cycle, cycle_nodes = detect_cycle(indices[batch_idx], seq_lens[batch_idx])
260
+
261
+ return indices
262
+
263
+ def detect_cycle(indices: torch.LongTensor, seq_len: int):
264
+ # Simple cycle detection algorithm
265
+ # Returns a boolean indicating if a cycle is detected and the nodes involved in the cycle
266
+ visited = set()
267
+ for node in range(1, seq_len - 1): # ignore the CLS/SEP tokens
268
+ if node in visited:
269
+ continue
270
+ current_path = set()
271
+ while node not in visited:
272
+ visited.add(node)
273
+ current_path.add(node)
274
+ node = indices[node].item()
275
+ if node == 0: break # roots never point to anything
276
+ if node in current_path:
277
+ return True, current_path # Cycle detected
278
+ return False, None
279
+
280
+ def choose_contracting_arc(indices: torch.LongTensor, sorted_indices: List[List[int]], cycle_nodes: set, contracted_arcs: set, seq_len: int, scores: List[List[float]]):
281
+ # Chooses the highest-scoring, non-cycling arc from a graph. Iterates through 'cycle_nodes' to find
282
+ # the best arc based on 'scores', avoiding cycles and zero node connections.
283
+ # For each node, we only look at the next highest scoring non-cycling arc
284
+ best_base_idx, best_head_idx = -1, -1
285
+ score = 0
286
+
287
+ # convert the indices to a list once, to avoid multiple conversions (saves a few seconds)
288
+ currents = indices.tolist()
289
+ for base_node in cycle_nodes:
290
+ if base_node in contracted_arcs: continue
291
+ # we don't want to take anything that has a higher score than the current value - we can end up in an endless loop
292
+ # Since the indices are sorted, as soon as we find our current item, we can move on to the next.
293
+ current = currents[base_node]
294
+ found_current = False
295
+
296
+ for head_node in sorted_indices[base_node]:
297
+ if head_node == current:
298
+ found_current = True
299
+ continue
300
+ if head_node in contracted_arcs: continue
301
+ if not found_current or head_node in cycle_nodes or head_node == 0:
302
+ continue
303
+
304
+ current_score = scores[base_node][head_node]
305
+ if current_score > score:
306
+ best_base_idx, best_head_idx, score = base_node, head_node, current_score
307
+ break
308
+
309
+ if best_base_idx == -1:
310
+ raise ValueError('Stuck in endless loop trying to compute syntax mst. Please try again setting compute_syntax_mst=False')
311
+
312
+ return best_base_idx, best_head_idx