Delete rte
Browse files- rte/README.md +0 -73
- rte/all_results.json +0 -15
- rte/config.json +0 -32
- rte/eval_results.json +0 -9
- rte/model.safetensors +0 -3
- rte/runs/May15_00-21-03_cs-Precision-7960-Tower/events.out.tfevents.1747282869.cs-Precision-7960-Tower.139999.0 +0 -3
- rte/runs/May15_00-21-03_cs-Precision-7960-Tower/events.out.tfevents.1747282940.cs-Precision-7960-Tower.139999.1 +0 -3
- rte/special_tokens_map.json +0 -7
- rte/tokenizer.json +0 -0
- rte/tokenizer_config.json +0 -56
- rte/train_results.json +0 -9
- rte/trainer_state.json +0 -42
- rte/training_args.bin +0 -3
- rte/vocab.txt +0 -0
rte/README.md
DELETED
@@ -1,73 +0,0 @@
|
|
1 |
-
---
|
2 |
-
library_name: transformers
|
3 |
-
language:
|
4 |
-
- en
|
5 |
-
license: apache-2.0
|
6 |
-
base_model: google-bert/bert-base-cased
|
7 |
-
tags:
|
8 |
-
- generated_from_trainer
|
9 |
-
datasets:
|
10 |
-
- glue
|
11 |
-
metrics:
|
12 |
-
- accuracy
|
13 |
-
model-index:
|
14 |
-
- name: rte
|
15 |
-
results:
|
16 |
-
- task:
|
17 |
-
name: Text Classification
|
18 |
-
type: text-classification
|
19 |
-
dataset:
|
20 |
-
name: GLUE RTE
|
21 |
-
type: glue
|
22 |
-
args: rte
|
23 |
-
metrics:
|
24 |
-
- name: Accuracy
|
25 |
-
type: accuracy
|
26 |
-
value: 0.7256317689530686
|
27 |
-
---
|
28 |
-
|
29 |
-
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
30 |
-
should probably proofread and complete it, then remove this comment. -->
|
31 |
-
|
32 |
-
# rte
|
33 |
-
|
34 |
-
This model is a fine-tuned version of [google-bert/bert-base-cased](https://huggingface.co/google-bert/bert-base-cased) on the GLUE RTE dataset.
|
35 |
-
It achieves the following results on the evaluation set:
|
36 |
-
- Loss: 1.1735
|
37 |
-
- Accuracy: 0.7256
|
38 |
-
|
39 |
-
## Model description
|
40 |
-
|
41 |
-
More information needed
|
42 |
-
|
43 |
-
## Intended uses & limitations
|
44 |
-
|
45 |
-
More information needed
|
46 |
-
|
47 |
-
## Training and evaluation data
|
48 |
-
|
49 |
-
More information needed
|
50 |
-
|
51 |
-
## Training procedure
|
52 |
-
|
53 |
-
### Training hyperparameters
|
54 |
-
|
55 |
-
The following hyperparameters were used during training:
|
56 |
-
- learning_rate: 5e-05
|
57 |
-
- train_batch_size: 32
|
58 |
-
- eval_batch_size: 8
|
59 |
-
- seed: 42
|
60 |
-
- optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
61 |
-
- lr_scheduler_type: linear
|
62 |
-
- num_epochs: 5.0
|
63 |
-
|
64 |
-
### Training results
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
### Framework versions
|
69 |
-
|
70 |
-
- Transformers 4.49.0
|
71 |
-
- Pytorch 2.6.0+cu118
|
72 |
-
- Datasets 3.3.1
|
73 |
-
- Tokenizers 0.21.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
rte/all_results.json
DELETED
@@ -1,15 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"epoch": 5.0,
|
3 |
-
"eval_accuracy": 0.7256317689530686,
|
4 |
-
"eval_loss": 1.1735286712646484,
|
5 |
-
"eval_runtime": 0.553,
|
6 |
-
"eval_samples": 277,
|
7 |
-
"eval_samples_per_second": 500.885,
|
8 |
-
"eval_steps_per_second": 63.289,
|
9 |
-
"total_flos": 835874173209600.0,
|
10 |
-
"train_loss": 0.4375361124674479,
|
11 |
-
"train_runtime": 69.5104,
|
12 |
-
"train_samples": 2490,
|
13 |
-
"train_samples_per_second": 179.11,
|
14 |
-
"train_steps_per_second": 5.611
|
15 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
rte/config.json
DELETED
@@ -1,32 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"_name_or_path": "google-bert/bert-base-cased",
|
3 |
-
"architectures": [
|
4 |
-
"BertForSequenceClassification"
|
5 |
-
],
|
6 |
-
"attention_probs_dropout_prob": 0.1,
|
7 |
-
"classifier_dropout": null,
|
8 |
-
"finetuning_task": "rte",
|
9 |
-
"gradient_checkpointing": false,
|
10 |
-
"hidden_act": "gelu",
|
11 |
-
"hidden_dropout_prob": 0.1,
|
12 |
-
"hidden_size": 768,
|
13 |
-
"initializer_range": 0.02,
|
14 |
-
"intermediate_size": 3072,
|
15 |
-
"label2id": {
|
16 |
-
"entailment": 0,
|
17 |
-
"not_entailment": 1
|
18 |
-
},
|
19 |
-
"layer_norm_eps": 1e-12,
|
20 |
-
"max_position_embeddings": 512,
|
21 |
-
"model_type": "bert",
|
22 |
-
"num_attention_heads": 12,
|
23 |
-
"num_hidden_layers": 12,
|
24 |
-
"pad_token_id": 0,
|
25 |
-
"position_embedding_type": "absolute",
|
26 |
-
"problem_type": "single_label_classification",
|
27 |
-
"torch_dtype": "float32",
|
28 |
-
"transformers_version": "4.49.0",
|
29 |
-
"type_vocab_size": 2,
|
30 |
-
"use_cache": true,
|
31 |
-
"vocab_size": 28996
|
32 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
rte/eval_results.json
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"epoch": 5.0,
|
3 |
-
"eval_accuracy": 0.7256317689530686,
|
4 |
-
"eval_loss": 1.1735286712646484,
|
5 |
-
"eval_runtime": 0.553,
|
6 |
-
"eval_samples": 277,
|
7 |
-
"eval_samples_per_second": 500.885,
|
8 |
-
"eval_steps_per_second": 63.289
|
9 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
rte/model.safetensors
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:bec20ee8f61ce31201da24f6a4f0c76b0af9d54aa38e11f302abecf1da428c81
|
3 |
-
size 440358464
|
|
|
|
|
|
|
|
rte/runs/May15_00-21-03_cs-Precision-7960-Tower/events.out.tfevents.1747282869.cs-Precision-7960-Tower.139999.0
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:c11680b7711ca83e55db84d0d8e879fe925f1169b96cb7ac31588c61651d406c
|
3 |
-
size 5487
|
|
|
|
|
|
|
|
rte/runs/May15_00-21-03_cs-Precision-7960-Tower/events.out.tfevents.1747282940.cs-Precision-7960-Tower.139999.1
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:e799700f4c6c4de44e2fdcac5638d1ed166efeecb040235bd3c8cafbfb5e686e
|
3 |
-
size 411
|
|
|
|
|
|
|
|
rte/special_tokens_map.json
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"cls_token": "[CLS]",
|
3 |
-
"mask_token": "[MASK]",
|
4 |
-
"pad_token": "[PAD]",
|
5 |
-
"sep_token": "[SEP]",
|
6 |
-
"unk_token": "[UNK]"
|
7 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
rte/tokenizer.json
DELETED
The diff for this file is too large to render.
See raw diff
|
|
rte/tokenizer_config.json
DELETED
@@ -1,56 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"added_tokens_decoder": {
|
3 |
-
"0": {
|
4 |
-
"content": "[PAD]",
|
5 |
-
"lstrip": false,
|
6 |
-
"normalized": false,
|
7 |
-
"rstrip": false,
|
8 |
-
"single_word": false,
|
9 |
-
"special": true
|
10 |
-
},
|
11 |
-
"100": {
|
12 |
-
"content": "[UNK]",
|
13 |
-
"lstrip": false,
|
14 |
-
"normalized": false,
|
15 |
-
"rstrip": false,
|
16 |
-
"single_word": false,
|
17 |
-
"special": true
|
18 |
-
},
|
19 |
-
"101": {
|
20 |
-
"content": "[CLS]",
|
21 |
-
"lstrip": false,
|
22 |
-
"normalized": false,
|
23 |
-
"rstrip": false,
|
24 |
-
"single_word": false,
|
25 |
-
"special": true
|
26 |
-
},
|
27 |
-
"102": {
|
28 |
-
"content": "[SEP]",
|
29 |
-
"lstrip": false,
|
30 |
-
"normalized": false,
|
31 |
-
"rstrip": false,
|
32 |
-
"single_word": false,
|
33 |
-
"special": true
|
34 |
-
},
|
35 |
-
"103": {
|
36 |
-
"content": "[MASK]",
|
37 |
-
"lstrip": false,
|
38 |
-
"normalized": false,
|
39 |
-
"rstrip": false,
|
40 |
-
"single_word": false,
|
41 |
-
"special": true
|
42 |
-
}
|
43 |
-
},
|
44 |
-
"clean_up_tokenization_spaces": false,
|
45 |
-
"cls_token": "[CLS]",
|
46 |
-
"do_lower_case": false,
|
47 |
-
"extra_special_tokens": {},
|
48 |
-
"mask_token": "[MASK]",
|
49 |
-
"model_max_length": 512,
|
50 |
-
"pad_token": "[PAD]",
|
51 |
-
"sep_token": "[SEP]",
|
52 |
-
"strip_accents": null,
|
53 |
-
"tokenize_chinese_chars": true,
|
54 |
-
"tokenizer_class": "BertTokenizer",
|
55 |
-
"unk_token": "[UNK]"
|
56 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
rte/train_results.json
DELETED
@@ -1,9 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"epoch": 5.0,
|
3 |
-
"total_flos": 835874173209600.0,
|
4 |
-
"train_loss": 0.4375361124674479,
|
5 |
-
"train_runtime": 69.5104,
|
6 |
-
"train_samples": 2490,
|
7 |
-
"train_samples_per_second": 179.11,
|
8 |
-
"train_steps_per_second": 5.611
|
9 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
rte/trainer_state.json
DELETED
@@ -1,42 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"best_metric": null,
|
3 |
-
"best_model_checkpoint": null,
|
4 |
-
"epoch": 5.0,
|
5 |
-
"eval_steps": 500,
|
6 |
-
"global_step": 390,
|
7 |
-
"is_hyper_param_search": false,
|
8 |
-
"is_local_process_zero": true,
|
9 |
-
"is_world_process_zero": true,
|
10 |
-
"log_history": [
|
11 |
-
{
|
12 |
-
"epoch": 5.0,
|
13 |
-
"step": 390,
|
14 |
-
"total_flos": 835874173209600.0,
|
15 |
-
"train_loss": 0.4375361124674479,
|
16 |
-
"train_runtime": 69.5104,
|
17 |
-
"train_samples_per_second": 179.11,
|
18 |
-
"train_steps_per_second": 5.611
|
19 |
-
}
|
20 |
-
],
|
21 |
-
"logging_steps": 500,
|
22 |
-
"max_steps": 390,
|
23 |
-
"num_input_tokens_seen": 0,
|
24 |
-
"num_train_epochs": 5,
|
25 |
-
"save_steps": 500,
|
26 |
-
"stateful_callbacks": {
|
27 |
-
"TrainerControl": {
|
28 |
-
"args": {
|
29 |
-
"should_epoch_stop": false,
|
30 |
-
"should_evaluate": false,
|
31 |
-
"should_log": false,
|
32 |
-
"should_save": true,
|
33 |
-
"should_training_stop": true
|
34 |
-
},
|
35 |
-
"attributes": {}
|
36 |
-
}
|
37 |
-
},
|
38 |
-
"total_flos": 835874173209600.0,
|
39 |
-
"train_batch_size": 32,
|
40 |
-
"trial_name": null,
|
41 |
-
"trial_params": null
|
42 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
rte/training_args.bin
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:d65356dd510949c754b5f2d0e5a584ec0593cc812f634d69f740bbd56d5f10cf
|
3 |
-
size 5304
|
|
|
|
|
|
|
|
rte/vocab.txt
DELETED
The diff for this file is too large to render.
See raw diff
|
|