Tom Aarsen commited on
Commit
bab4bc7
·
1 Parent(s): bd684eb

Revert inadvertent config, tokenizer updates

Browse files

This reverts commit a0b85cc42635c38e7064d3bf17e6085e964849cf.

Files changed (3) hide show
  1. config.json +2 -6
  2. special_tokens_map.json +6 -42
  3. tokenizer.json +0 -0
config.json CHANGED
@@ -1,4 +1,5 @@
1
  {
 
2
  "architectures": [
3
  "DebertaV2ForSequenceClassification"
4
  ],
@@ -19,7 +20,6 @@
19
  "neutral": 2
20
  },
21
  "layer_norm_eps": 1e-07,
22
- "legacy": true,
23
  "max_position_embeddings": 512,
24
  "max_relative_positions": -1,
25
  "model_type": "deberta-v2",
@@ -37,13 +37,9 @@
37
  "position_biased_input": false,
38
  "position_buckets": 256,
39
  "relative_attention": true,
40
- "sentence_transformers": {
41
- "activation_fn": "torch.nn.modules.linear.Identity",
42
- "version": "4.1.0.dev0"
43
- },
44
  "share_att_key": true,
45
  "torch_dtype": "float32",
46
- "transformers_version": "4.52.0.dev0",
47
  "type_vocab_size": 0,
48
  "vocab_size": 128100
49
  }
 
1
  {
2
+ "_name_or_path": "microsoft/deberta-v3-large",
3
  "architectures": [
4
  "DebertaV2ForSequenceClassification"
5
  ],
 
20
  "neutral": 2
21
  },
22
  "layer_norm_eps": 1e-07,
 
23
  "max_position_embeddings": 512,
24
  "max_relative_positions": -1,
25
  "model_type": "deberta-v2",
 
37
  "position_biased_input": false,
38
  "position_buckets": 256,
39
  "relative_attention": true,
 
 
 
 
40
  "share_att_key": true,
41
  "torch_dtype": "float32",
42
+ "transformers_version": "4.11.3",
43
  "type_vocab_size": 0,
44
  "vocab_size": 128100
45
  }
special_tokens_map.json CHANGED
@@ -1,46 +1,10 @@
1
  {
2
- "bos_token": {
3
- "content": "[CLS]",
4
- "lstrip": false,
5
- "normalized": false,
6
- "rstrip": false,
7
- "single_word": false
8
- },
9
- "cls_token": {
10
- "content": "[CLS]",
11
- "lstrip": false,
12
- "normalized": false,
13
- "rstrip": false,
14
- "single_word": false
15
- },
16
- "eos_token": {
17
- "content": "[SEP]",
18
- "lstrip": false,
19
- "normalized": false,
20
- "rstrip": false,
21
- "single_word": false
22
- },
23
- "mask_token": {
24
- "content": "[MASK]",
25
- "lstrip": false,
26
- "normalized": false,
27
- "rstrip": false,
28
- "single_word": false
29
- },
30
- "pad_token": {
31
- "content": "[PAD]",
32
- "lstrip": false,
33
- "normalized": false,
34
- "rstrip": false,
35
- "single_word": false
36
- },
37
- "sep_token": {
38
- "content": "[SEP]",
39
- "lstrip": false,
40
- "normalized": false,
41
- "rstrip": false,
42
- "single_word": false
43
- },
44
  "unk_token": {
45
  "content": "[UNK]",
46
  "lstrip": false,
 
1
  {
2
+ "bos_token": "[CLS]",
3
+ "cls_token": "[CLS]",
4
+ "eos_token": "[SEP]",
5
+ "mask_token": "[MASK]",
6
+ "pad_token": "[PAD]",
7
+ "sep_token": "[SEP]",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  "unk_token": {
9
  "content": "[UNK]",
10
  "lstrip": false,
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff