Training in progress, epoch 1
Browse files- config.json +1 -1
- pytorch_model.bin +1 -1
- tokenizer_config.json +7 -0
- training_args.bin +1 -1
config.json
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
{
|
2 |
-
"_name_or_path": "bert-
|
3 |
"architectures": [
|
4 |
"BertForQuestionAnswering"
|
5 |
],
|
|
|
1 |
{
|
2 |
+
"_name_or_path": "Giorgib/bert-finetuned-squad",
|
3 |
"architectures": [
|
4 |
"BertForQuestionAnswering"
|
5 |
],
|
pytorch_model.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 430952617
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:498d6224022e279baa412124b62d670de04c0273bc0447bfddef133280f87a66
|
3 |
size 430952617
|
tokenizer_config.json
CHANGED
@@ -3,11 +3,18 @@
|
|
3 |
"cls_token": "[CLS]",
|
4 |
"do_lower_case": false,
|
5 |
"mask_token": "[MASK]",
|
|
|
6 |
"model_max_length": 512,
|
|
|
7 |
"pad_token": "[PAD]",
|
|
|
|
|
8 |
"sep_token": "[SEP]",
|
|
|
9 |
"strip_accents": null,
|
10 |
"tokenize_chinese_chars": true,
|
11 |
"tokenizer_class": "BertTokenizer",
|
|
|
|
|
12 |
"unk_token": "[UNK]"
|
13 |
}
|
|
|
3 |
"cls_token": "[CLS]",
|
4 |
"do_lower_case": false,
|
5 |
"mask_token": "[MASK]",
|
6 |
+
"max_length": 384,
|
7 |
"model_max_length": 512,
|
8 |
+
"pad_to_multiple_of": null,
|
9 |
"pad_token": "[PAD]",
|
10 |
+
"pad_token_type_id": 0,
|
11 |
+
"padding_side": "right",
|
12 |
"sep_token": "[SEP]",
|
13 |
+
"stride": 128,
|
14 |
"strip_accents": null,
|
15 |
"tokenize_chinese_chars": true,
|
16 |
"tokenizer_class": "BertTokenizer",
|
17 |
+
"truncation_side": "right",
|
18 |
+
"truncation_strategy": "only_second",
|
19 |
"unk_token": "[UNK]"
|
20 |
}
|
training_args.bin
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 4027
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d4ee31283cef3f79f7db8e26dbd3d066d9724ae22c501b35d4d52f15ba8da301
|
3 |
size 4027
|