alvarodt commited on
Commit
a1fce5f
·
1 Parent(s): b8311bd

Upload tokenizer

Browse files
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json CHANGED
@@ -1,7 +1,5 @@
1
  {
2
- "cls_token": "[CLS]",
3
- "mask_token": "[MASK]",
4
- "pad_token": "[PAD]",
5
- "sep_token": "[SEP]",
6
- "unk_token": "[UNK]"
7
  }
 
1
  {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "unk_token": "<|endoftext|>"
 
 
5
  }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,16 +1,10 @@
1
  {
2
- "cls_token": "[CLS]",
3
- "do_basic_tokenize": true,
4
- "do_lower_case": true,
5
- "mask_token": "[MASK]",
6
- "model_max_length": 512,
7
- "name_or_path": "distilbert-base-uncased",
8
- "never_split": null,
9
- "pad_token": "[PAD]",
10
- "sep_token": "[SEP]",
11
  "special_tokens_map_file": null,
12
- "strip_accents": null,
13
- "tokenize_chinese_chars": true,
14
- "tokenizer_class": "DistilBertTokenizer",
15
- "unk_token": "[UNK]"
16
  }
 
1
  {
2
+ "add_prefix_space": false,
3
+ "bos_token": "<|endoftext|>",
4
+ "eos_token": "<|endoftext|>",
5
+ "model_max_length": 1024,
6
+ "name_or_path": "gpt2",
 
 
 
 
7
  "special_tokens_map_file": null,
8
+ "tokenizer_class": "GPT2Tokenizer",
9
+ "unk_token": "<|endoftext|>"
 
 
10
  }
vocab.json ADDED
The diff for this file is too large to render. See raw diff