Upload modeling_fastesm.py with huggingface_hub
Browse files- modeling_fastesm.py +6 -2
modeling_fastesm.py
CHANGED
|
@@ -903,6 +903,7 @@ class FastEsmModel(FastEsmPreTrainedModel, EmbeddingMixin):
|
|
| 903 |
output_attentions: Optional[bool] = None,
|
| 904 |
output_hidden_states: Optional[bool] = None,
|
| 905 |
return_dict: Optional[bool] = None, # to play nice with HF adjacent packages
|
|
|
|
| 906 |
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
|
| 907 |
"""Forward pass for base model.
|
| 908 |
|
|
@@ -979,6 +980,7 @@ class FastEsmForMaskedLM(FastEsmPreTrainedModel, EmbeddingMixin):
|
|
| 979 |
output_attentions: Optional[bool] = None,
|
| 980 |
output_hidden_states: Optional[bool] = None,
|
| 981 |
return_dict: Optional[bool] = None, # to play nice with HF adjacent packages
|
|
|
|
| 982 |
) -> Union[Tuple, EsmMaskedLMOutput]:
|
| 983 |
outputs = self.esm(
|
| 984 |
input_ids,
|
|
@@ -1032,7 +1034,8 @@ class FastEsmForSequenceClassification(FastEsmPreTrainedModel, EmbeddingMixin):
|
|
| 1032 |
labels: Optional[torch.Tensor] = None,
|
| 1033 |
output_attentions: Optional[bool] = None,
|
| 1034 |
output_hidden_states: Optional[bool] = None,
|
| 1035 |
-
return_dict: Optional[bool] = None
|
|
|
|
| 1036 |
) -> Union[Tuple, SequenceClassifierOutput]:
|
| 1037 |
outputs = self.esm(
|
| 1038 |
input_ids,
|
|
@@ -1099,7 +1102,8 @@ class FastEsmForTokenClassification(FastEsmPreTrainedModel, EmbeddingMixin):
|
|
| 1099 |
labels: Optional[torch.Tensor] = None,
|
| 1100 |
output_attentions: Optional[bool] = None,
|
| 1101 |
output_hidden_states: Optional[bool] = None,
|
| 1102 |
-
return_dict: Optional[bool] = None
|
|
|
|
| 1103 |
) -> Union[Tuple, TokenClassifierOutput]:
|
| 1104 |
outputs = self.esm(
|
| 1105 |
input_ids,
|
|
|
|
| 903 |
output_attentions: Optional[bool] = None,
|
| 904 |
output_hidden_states: Optional[bool] = None,
|
| 905 |
return_dict: Optional[bool] = None, # to play nice with HF adjacent packages
|
| 906 |
+
**kwargs,
|
| 907 |
) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
|
| 908 |
"""Forward pass for base model.
|
| 909 |
|
|
|
|
| 980 |
output_attentions: Optional[bool] = None,
|
| 981 |
output_hidden_states: Optional[bool] = None,
|
| 982 |
return_dict: Optional[bool] = None, # to play nice with HF adjacent packages
|
| 983 |
+
**kwargs,
|
| 984 |
) -> Union[Tuple, EsmMaskedLMOutput]:
|
| 985 |
outputs = self.esm(
|
| 986 |
input_ids,
|
|
|
|
| 1034 |
labels: Optional[torch.Tensor] = None,
|
| 1035 |
output_attentions: Optional[bool] = None,
|
| 1036 |
output_hidden_states: Optional[bool] = None,
|
| 1037 |
+
return_dict: Optional[bool] = None,
|
| 1038 |
+
**kwargs,
|
| 1039 |
) -> Union[Tuple, SequenceClassifierOutput]:
|
| 1040 |
outputs = self.esm(
|
| 1041 |
input_ids,
|
|
|
|
| 1102 |
labels: Optional[torch.Tensor] = None,
|
| 1103 |
output_attentions: Optional[bool] = None,
|
| 1104 |
output_hidden_states: Optional[bool] = None,
|
| 1105 |
+
return_dict: Optional[bool] = None,
|
| 1106 |
+
**kwargs,
|
| 1107 |
) -> Union[Tuple, TokenClassifierOutput]:
|
| 1108 |
outputs = self.esm(
|
| 1109 |
input_ids,
|