superbigtree commited on
Commit
5e0bcf2
·
verified ·
1 Parent(s): 8f144ab

End of training

Browse files
README.md CHANGED
@@ -2,7 +2,7 @@
2
  tags:
3
  - generated_from_trainer
4
  datasets:
5
- - coco_dataset_script
6
  model-index:
7
  - name: clip-roberta-finetuned
8
  results: []
@@ -13,7 +13,9 @@ should probably proofread and complete it, then remove this comment. -->
13
 
14
  # clip-roberta-finetuned
15
 
16
- This model was trained from scratch on the coco_dataset_script dataset.
 
 
17
 
18
  ## Model description
19
 
 
2
  tags:
3
  - generated_from_trainer
4
  datasets:
5
+ - ydshieh/coco_dataset_script
6
  model-index:
7
  - name: clip-roberta-finetuned
8
  results: []
 
13
 
14
  # clip-roberta-finetuned
15
 
16
+ This model was trained from scratch on the ydshieh/coco_dataset_script 2017 dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Loss: 1.5877
19
 
20
  ## Model description
21
 
all_results.json CHANGED
@@ -1,11 +1,11 @@
1
  {
2
- "epoch": 9.95,
3
- "eval_loss": 1.6496304273605347,
4
- "eval_runtime": 23.1083,
5
- "eval_samples_per_second": 1082.47,
6
- "eval_steps_per_second": 2.12,
7
- "train_loss": 0.0,
8
- "train_runtime": 1.83,
9
- "train_samples_per_second": 1616827.341,
10
- "train_steps_per_second": 3158.501
11
  }
 
1
  {
2
+ "epoch": 5.0,
3
+ "eval_loss": 1.587681531906128,
4
+ "eval_runtime": 22.6452,
5
+ "eval_samples_per_second": 1104.605,
6
+ "eval_steps_per_second": 2.164,
7
+ "train_loss": 0.24804650540995352,
8
+ "train_runtime": 4511.4291,
9
+ "train_samples_per_second": 655.838,
10
+ "train_steps_per_second": 1.281
11
  }
eval_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "epoch": 9.95,
3
- "eval_loss": 1.6496304273605347,
4
- "eval_runtime": 23.1083,
5
- "eval_samples_per_second": 1082.47,
6
- "eval_steps_per_second": 2.12
7
  }
 
1
  {
2
+ "epoch": 5.0,
3
+ "eval_loss": 1.587681531906128,
4
+ "eval_runtime": 22.6452,
5
+ "eval_samples_per_second": 1104.605,
6
+ "eval_steps_per_second": 2.164
7
  }
runs/Feb14_23-59-58_584f6c291e44/events.out.tfevents.1707959804.584f6c291e44.18323.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d2838cba49e2bea107dae0a4d3e7d1559982ea89db4bc11224b5e71975ddf45
3
+ size 359
train_results.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
- "epoch": 9.95,
3
- "train_loss": 0.0,
4
- "train_runtime": 1.83,
5
- "train_samples_per_second": 1616827.341,
6
- "train_steps_per_second": 3158.501
7
  }
 
1
  {
2
+ "epoch": 5.0,
3
+ "train_loss": 0.24804650540995352,
4
+ "train_runtime": 4511.4291,
5
+ "train_samples_per_second": 655.838,
6
+ "train_steps_per_second": 1.281
7
  }
trainer_state.json CHANGED
@@ -1,159 +1,87 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 9.948096885813149,
5
  "eval_steps": 500,
6
- "global_step": 11500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.43,
13
- "learning_rate": 4.7837370242214535e-05,
14
- "loss": 0.7277,
15
  "step": 500
16
  },
17
  {
18
  "epoch": 0.87,
19
- "learning_rate": 4.567474048442907e-05,
20
- "loss": 0.3994,
21
  "step": 1000
22
  },
23
  {
24
  "epoch": 1.3,
25
- "learning_rate": 4.35121107266436e-05,
26
- "loss": 0.3097,
27
  "step": 1500
28
  },
29
  {
30
  "epoch": 1.73,
31
- "learning_rate": 4.134948096885813e-05,
32
- "loss": 0.2747,
33
  "step": 2000
34
  },
35
  {
36
  "epoch": 2.16,
37
- "learning_rate": 3.9186851211072664e-05,
38
- "loss": 0.2418,
39
  "step": 2500
40
  },
41
  {
42
  "epoch": 2.6,
43
- "learning_rate": 3.70242214532872e-05,
44
- "loss": 0.2109,
45
  "step": 3000
46
  },
47
  {
48
  "epoch": 3.03,
49
- "learning_rate": 3.4861591695501735e-05,
50
- "loss": 0.2044,
51
  "step": 3500
52
  },
53
  {
54
  "epoch": 3.46,
55
- "learning_rate": 3.269896193771627e-05,
56
- "loss": 0.1703,
57
  "step": 4000
58
  },
59
  {
60
  "epoch": 3.89,
61
- "learning_rate": 3.05363321799308e-05,
62
- "loss": 0.1709,
63
  "step": 4500
64
  },
65
  {
66
  "epoch": 4.33,
67
- "learning_rate": 2.8373702422145332e-05,
68
- "loss": 0.1466,
69
  "step": 5000
70
  },
71
  {
72
  "epoch": 4.76,
73
- "learning_rate": 2.6211072664359864e-05,
74
- "loss": 0.1433,
75
- "step": 5500
76
- },
77
- {
78
- "epoch": 5.19,
79
- "learning_rate": 2.4048442906574396e-05,
80
- "loss": 0.1302,
81
- "step": 6000
82
- },
83
- {
84
- "epoch": 5.62,
85
- "learning_rate": 2.188581314878893e-05,
86
- "loss": 0.1202,
87
- "step": 6500
88
- },
89
- {
90
- "epoch": 6.06,
91
- "learning_rate": 1.972318339100346e-05,
92
- "loss": 0.1169,
93
- "step": 7000
94
- },
95
- {
96
- "epoch": 6.49,
97
- "learning_rate": 1.7560553633217993e-05,
98
- "loss": 0.1016,
99
- "step": 7500
100
- },
101
- {
102
- "epoch": 6.92,
103
- "learning_rate": 1.5397923875432525e-05,
104
- "loss": 0.1027,
105
- "step": 8000
106
- },
107
- {
108
- "epoch": 7.35,
109
- "learning_rate": 1.323529411764706e-05,
110
- "loss": 0.0892,
111
- "step": 8500
112
- },
113
- {
114
- "epoch": 7.79,
115
- "learning_rate": 1.1072664359861593e-05,
116
- "loss": 0.087,
117
- "step": 9000
118
- },
119
- {
120
- "epoch": 8.22,
121
- "learning_rate": 8.910034602076126e-06,
122
- "loss": 0.0804,
123
- "step": 9500
124
- },
125
- {
126
- "epoch": 8.65,
127
- "learning_rate": 6.747404844290659e-06,
128
- "loss": 0.0749,
129
- "step": 10000
130
- },
131
- {
132
- "epoch": 9.08,
133
- "learning_rate": 4.584775086505191e-06,
134
- "loss": 0.0718,
135
- "step": 10500
136
- },
137
- {
138
- "epoch": 9.52,
139
  "learning_rate": 2.4221453287197232e-06,
140
- "loss": 0.0654,
141
- "step": 11000
142
- },
143
- {
144
- "epoch": 9.95,
145
- "learning_rate": 2.5951557093425607e-07,
146
- "loss": 0.0644,
147
- "step": 11500
148
  },
149
  {
150
- "epoch": 9.95,
151
- "step": 11500,
152
- "total_flos": 7.86149740118016e+17,
153
- "train_loss": 0.0,
154
- "train_runtime": 1.83,
155
- "train_samples_per_second": 1616827.341,
156
- "train_steps_per_second": 3158.501
157
  }
158
  ],
159
  "logging_steps": 500,
@@ -161,7 +89,7 @@
161
  "num_input_tokens_seen": 0,
162
  "num_train_epochs": 5,
163
  "save_steps": 500,
164
- "total_flos": 7.86149740118016e+17,
165
  "train_batch_size": 64,
166
  "trial_name": null,
167
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 5.0,
5
  "eval_steps": 500,
6
+ "global_step": 5780,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 0.43,
13
+ "learning_rate": 4.567474048442907e-05,
14
+ "loss": 0.7182,
15
  "step": 500
16
  },
17
  {
18
  "epoch": 0.87,
19
+ "learning_rate": 4.134948096885813e-05,
20
+ "loss": 0.393,
21
  "step": 1000
22
  },
23
  {
24
  "epoch": 1.3,
25
+ "learning_rate": 3.70242214532872e-05,
26
+ "loss": 0.3001,
27
  "step": 1500
28
  },
29
  {
30
  "epoch": 1.73,
31
+ "learning_rate": 3.269896193771627e-05,
32
+ "loss": 0.2633,
33
  "step": 2000
34
  },
35
  {
36
  "epoch": 2.16,
37
+ "learning_rate": 2.8373702422145332e-05,
38
+ "loss": 0.2275,
39
  "step": 2500
40
  },
41
  {
42
  "epoch": 2.6,
43
+ "learning_rate": 2.4048442906574396e-05,
44
+ "loss": 0.1934,
45
  "step": 3000
46
  },
47
  {
48
  "epoch": 3.03,
49
+ "learning_rate": 1.972318339100346e-05,
50
+ "loss": 0.1841,
51
  "step": 3500
52
  },
53
  {
54
  "epoch": 3.46,
55
+ "learning_rate": 1.5397923875432525e-05,
56
+ "loss": 0.1484,
57
  "step": 4000
58
  },
59
  {
60
  "epoch": 3.89,
61
+ "learning_rate": 1.1072664359861593e-05,
62
+ "loss": 0.1445,
63
  "step": 4500
64
  },
65
  {
66
  "epoch": 4.33,
67
+ "learning_rate": 6.747404844290659e-06,
68
+ "loss": 0.1217,
69
  "step": 5000
70
  },
71
  {
72
  "epoch": 4.76,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73
  "learning_rate": 2.4221453287197232e-06,
74
+ "loss": 0.113,
75
+ "step": 5500
 
 
 
 
 
 
76
  },
77
  {
78
+ "epoch": 5.0,
79
+ "step": 5780,
80
+ "total_flos": 3.951256954680115e+17,
81
+ "train_loss": 0.24804650540995352,
82
+ "train_runtime": 4511.4291,
83
+ "train_samples_per_second": 655.838,
84
+ "train_steps_per_second": 1.281
85
  }
86
  ],
87
  "logging_steps": 500,
 
89
  "num_input_tokens_seen": 0,
90
  "num_train_epochs": 5,
91
  "save_steps": 500,
92
+ "total_flos": 3.951256954680115e+17,
93
  "train_batch_size": 64,
94
  "trial_name": null,
95
  "trial_params": null