Upload modeling_esm_plusplus.py with huggingface_hub
Browse files- modeling_esm_plusplus.py +3 -0
modeling_esm_plusplus.py
CHANGED
|
@@ -1002,6 +1002,7 @@ class ESMplusplusForSequenceClassification(ESMplusplusForMaskedLM, EmbeddingMixi
|
|
| 1002 |
logits=logits,
|
| 1003 |
last_hidden_state=x,
|
| 1004 |
hidden_states=output.hidden_states,
|
|
|
|
| 1005 |
)
|
| 1006 |
|
| 1007 |
|
|
@@ -1065,6 +1066,7 @@ class ESMplusplusForTokenClassification(ESMplusplusForMaskedLM, EmbeddingMixin):
|
|
| 1065 |
logits=logits,
|
| 1066 |
last_hidden_state=x,
|
| 1067 |
hidden_states=output.hidden_states,
|
|
|
|
| 1068 |
)
|
| 1069 |
|
| 1070 |
|
|
@@ -1161,6 +1163,7 @@ class EsmSequenceTokenizer(PreTrainedTokenizerFast):
|
|
| 1161 |
# sequences are merged if you want.
|
| 1162 |
tokenizer.post_processor = TemplateProcessing( # type: ignore
|
| 1163 |
single="<cls> $A <eos>",
|
|
|
|
| 1164 |
special_tokens=[
|
| 1165 |
("<cls>", tokenizer.token_to_id("<cls>")),
|
| 1166 |
("<eos>", tokenizer.token_to_id("<eos>")),
|
|
|
|
| 1002 |
logits=logits,
|
| 1003 |
last_hidden_state=x,
|
| 1004 |
hidden_states=output.hidden_states,
|
| 1005 |
+
attentions=output.attentions,
|
| 1006 |
)
|
| 1007 |
|
| 1008 |
|
|
|
|
| 1066 |
logits=logits,
|
| 1067 |
last_hidden_state=x,
|
| 1068 |
hidden_states=output.hidden_states,
|
| 1069 |
+
attentions=output.attentions,
|
| 1070 |
)
|
| 1071 |
|
| 1072 |
|
|
|
|
| 1163 |
# sequences are merged if you want.
|
| 1164 |
tokenizer.post_processor = TemplateProcessing( # type: ignore
|
| 1165 |
single="<cls> $A <eos>",
|
| 1166 |
+
pair="<cls>:0 $A:0 <eos>:0 $B:1 <eos>:1",
|
| 1167 |
special_tokens=[
|
| 1168 |
("<cls>", tokenizer.token_to_id("<cls>")),
|
| 1169 |
("<eos>", tokenizer.token_to_id("<eos>")),
|