MohametSena commited on
Commit
e440ecf
·
1 Parent(s): bedf728

Training in progress, step 300

Browse files
config.json CHANGED
@@ -6,7 +6,7 @@
6
  "AutoConfig": "index.TimesheetEstimatorConfig",
7
  "AutoModel": "index.TimesheetEstimator"
8
  },
9
- "encoder_model_name": "distilroberta-base",
10
  "hidden_size": 768,
11
  "id2label": {
12
  "0": "LABEL_0"
@@ -15,5 +15,5 @@
15
  "LABEL_0": 0
16
  },
17
  "torch_dtype": "float32",
18
- "transformers_version": "4.27.0.dev0"
19
  }
 
6
  "AutoConfig": "index.TimesheetEstimatorConfig",
7
  "AutoModel": "index.TimesheetEstimator"
8
  },
9
+ "encoder_model_name": "bert-base-multilingual-cased",
10
  "hidden_size": 768,
11
  "id2label": {
12
  "0": "LABEL_0"
 
15
  "LABEL_0": 0
16
  },
17
  "torch_dtype": "float32",
18
+ "transformers_version": "4.29.1"
19
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3c046694e0f0f1459f47ff3b7dc33ab49a07dd2189ed0debd87fafd1904796d1
3
- size 328515381
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abd696b92220e36df10a76f4ff21fb419e0b3720cf7a35699bcf0f22ff99e5ab
3
+ size 711487413
special_tokens_map.json CHANGED
@@ -1,15 +1,7 @@
1
  {
2
- "bos_token": "<s>",
3
- "cls_token": "<s>",
4
- "eos_token": "</s>",
5
- "mask_token": {
6
- "content": "<mask>",
7
- "lstrip": true,
8
- "normalized": false,
9
- "rstrip": false,
10
- "single_word": false
11
- },
12
- "pad_token": "<pad>",
13
- "sep_token": "</s>",
14
- "unk_token": "<unk>"
15
  }
 
1
  {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
 
 
 
 
 
 
 
 
7
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,15 +1,13 @@
1
  {
2
- "add_prefix_space": false,
3
- "bos_token": "<s>",
4
- "cls_token": "<s>",
5
- "eos_token": "</s>",
6
- "errors": "replace",
7
- "mask_token": "<mask>",
8
  "model_max_length": 512,
9
- "pad_token": "<pad>",
10
- "sep_token": "</s>",
11
- "special_tokens_map_file": null,
12
- "tokenizer_class": "RobertaTokenizer",
13
- "trim_offsets": true,
14
- "unk_token": "<unk>"
15
  }
 
1
  {
2
+ "clean_up_tokenization_spaces": true,
3
+ "cls_token": "[CLS]",
4
+ "do_lower_case": false,
5
+ "mask_token": "[MASK]",
 
 
6
  "model_max_length": 512,
7
+ "pad_token": "[PAD]",
8
+ "sep_token": "[SEP]",
9
+ "strip_accents": null,
10
+ "tokenize_chinese_chars": true,
11
+ "tokenizer_class": "BertTokenizer",
12
+ "unk_token": "[UNK]"
13
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:790a0e33b4ec23b4309ec55a6d239043b20ef5bad0672062137f307f3e18f979
3
- size 3515
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63c7e7ebe3649c2f3bcc3c4d60e05adc8e903dbb835f042784536c8419072ae9
3
+ size 3899
vocab.txt ADDED
The diff for this file is too large to render. See raw diff