Yaxin commited on
Commit
75f3357
·
1 Parent(s): 00dca83

add large files

Browse files
README.md ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - generated_from_trainer
5
+ metrics:
6
+ - precision
7
+ - recall
8
+ - f1
9
+ - accuracy
10
+ model-index:
11
+ - name: bert-base-multilingual-cased-42-QAData
12
+ results: []
13
+ ---
14
+
15
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
16
+ should probably proofread and complete it, then remove this comment. -->
17
+
18
+ # bert-base-multilingual-cased-42-QAData
19
+
20
+ This model is a fine-tuned version of [bert-base-multilingual-cased](https://huggingface.co/bert-base-multilingual-cased) on an unknown dataset.
21
+ It achieves the following results on the evaluation set:
22
+ - Loss: 0.0873
23
+ - Precision: 0.4420
24
+ - Recall: 0.2887
25
+ - F1: 0.3493
26
+ - Accuracy: 0.9755
27
+
28
+ ## Model description
29
+
30
+ More information needed
31
+
32
+ ## Intended uses & limitations
33
+
34
+ More information needed
35
+
36
+ ## Training and evaluation data
37
+
38
+ More information needed
39
+
40
+ ## Training procedure
41
+
42
+ ### Training hyperparameters
43
+
44
+ The following hyperparameters were used during training:
45
+ - learning_rate: 4e-05
46
+ - train_batch_size: 16
47
+ - eval_batch_size: 16
48
+ - seed: 42
49
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
50
+ - lr_scheduler_type: linear
51
+ - lr_scheduler_warmup_ratio: 0.1
52
+ - num_epochs: 1.0
53
+
54
+ ### Training results
55
+
56
+ | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
57
+ |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:|
58
+ | 0.1064 | 1.0 | 3118 | 0.0873 | 0.4420 | 0.2887 | 0.3493 | 0.9755 |
59
+
60
+
61
+ ### Framework versions
62
+
63
+ - Transformers 4.24.0
64
+ - Pytorch 1.12.1+cu113
65
+ - Datasets 2.6.1
66
+ - Tokenizers 0.13.1
all_results.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_accuracy": 0.9755093109113304,
4
+ "eval_f1": 0.34928397084677487,
5
+ "eval_loss": 0.08728940039873123,
6
+ "eval_precision": 0.44204307346683086,
7
+ "eval_recall": 0.28870219099617667,
8
+ "eval_runtime": 1788.1641,
9
+ "eval_samples": 80069,
10
+ "eval_samples_per_second": 44.777,
11
+ "eval_steps_per_second": 2.799,
12
+ "train_loss": 0.10636317538786882,
13
+ "train_runtime": 5819.6703,
14
+ "train_samples": 49881,
15
+ "train_samples_per_second": 8.571,
16
+ "train_steps_per_second": 0.536
17
+ }
config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "bert-base-multilingual-cased",
3
+ "architectures": [
4
+ "BertForTokenClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.1,
7
+ "classifier_dropout": null,
8
+ "directionality": "bidi",
9
+ "finetuning_task": "BIO",
10
+ "hidden_act": "gelu",
11
+ "hidden_dropout_prob": 0.1,
12
+ "hidden_size": 768,
13
+ "id2label": {
14
+ "0": "B",
15
+ "1": "I",
16
+ "2": "O"
17
+ },
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 3072,
20
+ "label2id": {
21
+ "B": 0,
22
+ "I": 1,
23
+ "O": 2
24
+ },
25
+ "layer_norm_eps": 1e-12,
26
+ "max_position_embeddings": 512,
27
+ "model_type": "bert",
28
+ "num_attention_heads": 12,
29
+ "num_hidden_layers": 12,
30
+ "pad_token_id": 0,
31
+ "pooler_fc_size": 768,
32
+ "pooler_num_attention_heads": 12,
33
+ "pooler_num_fc_layers": 3,
34
+ "pooler_size_per_head": 128,
35
+ "pooler_type": "first_token_transform",
36
+ "position_embedding_type": "absolute",
37
+ "torch_dtype": "float32",
38
+ "transformers_version": "4.24.0",
39
+ "type_vocab_size": 2,
40
+ "use_cache": true,
41
+ "vocab_size": 119547
42
+ }
eval_results.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "eval_accuracy": 0.9755093109113304,
4
+ "eval_f1": 0.34928397084677487,
5
+ "eval_loss": 0.08728940039873123,
6
+ "eval_precision": 0.44204307346683086,
7
+ "eval_recall": 0.28870219099617667,
8
+ "eval_runtime": 1788.1641,
9
+ "eval_samples": 80069,
10
+ "eval_samples_per_second": 44.777,
11
+ "eval_steps_per_second": 2.799
12
+ }
output.text ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a48d4e552623b648ef5ac6f4467448e273cdab8eeebbffe711bc49a93feb9119
3
+ size 709131057
runs/Nov02_13-33-30_172f3140324b/1667396111.2042036/events.out.tfevents.1667396111.172f3140324b.1517.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f366c3b04985a2188defc596f570e15da4406fa3dbf7cdad4ebb5807cffe554
3
+ size 5540
runs/Nov02_13-33-30_172f3140324b/events.out.tfevents.1667396111.172f3140324b.1517.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31f0c028a8438340de39d9b023e5902cc02dcaad45757c3db4c22728bf448f21
3
+ size 5065
runs/Nov02_13-33-30_172f3140324b/events.out.tfevents.1667403722.172f3140324b.1517.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81f40f43d9a17625062e3db3043c6be18f43355d812569454e7dc296a59f6c1d
3
+ size 512
special_tokens_map.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "do_lower_case": false,
4
+ "mask_token": "[MASK]",
5
+ "model_max_length": 512,
6
+ "name_or_path": "bert-base-multilingual-cased",
7
+ "pad_token": "[PAD]",
8
+ "sep_token": "[SEP]",
9
+ "special_tokens_map_file": null,
10
+ "strip_accents": null,
11
+ "tokenize_chinese_chars": true,
12
+ "tokenizer_class": "BertTokenizer",
13
+ "unk_token": "[UNK]"
14
+ }
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 1.0,
3
+ "train_loss": 0.10636317538786882,
4
+ "train_runtime": 5819.6703,
5
+ "train_samples": 49881,
6
+ "train_samples_per_second": 8.571,
7
+ "train_steps_per_second": 0.536
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.34928397084677487,
3
+ "best_model_checkpoint": "./bert-base-multilingual-cased-42-QAData/checkpoint-3118",
4
+ "epoch": 1.0,
5
+ "global_step": 3118,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "learning_rate": 0.0,
13
+ "loss": 0.1064,
14
+ "step": 3118
15
+ },
16
+ {
17
+ "epoch": 1.0,
18
+ "eval_accuracy": 0.9755093109113304,
19
+ "eval_f1": 0.34928397084677487,
20
+ "eval_loss": 0.08728940039873123,
21
+ "eval_precision": 0.44204307346683086,
22
+ "eval_recall": 0.28870219099617667,
23
+ "eval_runtime": 1787.1962,
24
+ "eval_samples_per_second": 44.801,
25
+ "eval_steps_per_second": 2.8,
26
+ "step": 3118
27
+ },
28
+ {
29
+ "epoch": 1.0,
30
+ "step": 3118,
31
+ "total_flos": 1.1125436195108934e+16,
32
+ "train_loss": 0.10636317538786882,
33
+ "train_runtime": 5819.6703,
34
+ "train_samples_per_second": 8.571,
35
+ "train_steps_per_second": 0.536
36
+ }
37
+ ],
38
+ "max_steps": 3118,
39
+ "num_train_epochs": 1,
40
+ "total_flos": 1.1125436195108934e+16,
41
+ "trial_name": null,
42
+ "trial_params": null
43
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43a480e22687cf827fc780214f454e135204d7b5e069eeb009ebd11c3236acaa
3
+ size 3439
vocab.txt ADDED
The diff for this file is too large to render. See raw diff