SystemAdmin123 commited on
Commit
5350589
·
verified ·
1 Parent(s): 9e9b536

Training in progress, step 1600, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c5c077b171ae4b737b7f301a57a9b94a0f46db1956a8b28ce23c479697cca649
3
  size 2066752
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bcd41d10593de3cdb9d40fb311ae2ff3910a2b47dbbe8786beb51948f14e6ff
3
  size 2066752
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b791eecb5542c5f0363f26e98b2e45debdfbfcde29b0a6e042cdf3e5e6567d8
3
  size 2162798
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d350466da0c8d34450032ea9f33f3752c1be3a599d9885109e18618f6872f48a
3
  size 2162798
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a85b7ee4e3e06f8b21d4d23e7eb8bbe5510e7f25d23cfc2ffc16d97845a1be25
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ced0ac0d077b41bd2987add3782b7ce1140142ac3cddaf433babda96674c50fb
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:427276ae77d918ee2b880ea4152618640d39ea76588856ca2cd62fe2ab8b83d7
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ed6aad8025a80b776f2d50234fd05b8c1e2e758d3d427458fe15ed9bc7f733a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.3551346552234389,
5
  "eval_steps": 200,
6
- "global_step": 1200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -903,6 +903,302 @@
903
  "eval_samples_per_second": 74.702,
904
  "eval_steps_per_second": 18.7,
905
  "step": 1200
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
906
  }
907
  ],
908
  "logging_steps": 10,
@@ -922,7 +1218,7 @@
922
  "attributes": {}
923
  }
924
  },
925
- "total_flos": 30763608440832.0,
926
  "train_batch_size": 4,
927
  "trial_name": null,
928
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.47351287363125183,
5
  "eval_steps": 200,
6
+ "global_step": 1600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
903
  "eval_samples_per_second": 74.702,
904
  "eval_steps_per_second": 18.7,
905
  "step": 1200
906
+ },
907
+ {
908
+ "epoch": 0.3580941106836342,
909
+ "grad_norm": 0.466796875,
910
+ "learning_rate": 0.0001135169494631497,
911
+ "loss": 8.6666,
912
+ "step": 1210
913
+ },
914
+ {
915
+ "epoch": 0.36105356614382955,
916
+ "grad_norm": 0.484375,
917
+ "learning_rate": 0.00011220516908034601,
918
+ "loss": 8.6954,
919
+ "step": 1220
920
+ },
921
+ {
922
+ "epoch": 0.3640130216040249,
923
+ "grad_norm": 0.5,
924
+ "learning_rate": 0.00011089125314635726,
925
+ "loss": 8.7236,
926
+ "step": 1230
927
+ },
928
+ {
929
+ "epoch": 0.3669724770642202,
930
+ "grad_norm": 0.62109375,
931
+ "learning_rate": 0.00010957543155842702,
932
+ "loss": 8.7772,
933
+ "step": 1240
934
+ },
935
+ {
936
+ "epoch": 0.36993193252441553,
937
+ "grad_norm": 0.859375,
938
+ "learning_rate": 0.00010825793454723325,
939
+ "loss": 8.7222,
940
+ "step": 1250
941
+ },
942
+ {
943
+ "epoch": 0.37289138798461086,
944
+ "grad_norm": 0.51171875,
945
+ "learning_rate": 0.00010693899263660441,
946
+ "loss": 8.6544,
947
+ "step": 1260
948
+ },
949
+ {
950
+ "epoch": 0.3758508434448062,
951
+ "grad_norm": 0.498046875,
952
+ "learning_rate": 0.00010561883660318455,
953
+ "loss": 8.6639,
954
+ "step": 1270
955
+ },
956
+ {
957
+ "epoch": 0.37881029890500145,
958
+ "grad_norm": 0.53515625,
959
+ "learning_rate": 0.00010429769743605407,
960
+ "loss": 8.6819,
961
+ "step": 1280
962
+ },
963
+ {
964
+ "epoch": 0.3817697543651968,
965
+ "grad_norm": 0.68359375,
966
+ "learning_rate": 0.00010297580629631325,
967
+ "loss": 8.6511,
968
+ "step": 1290
969
+ },
970
+ {
971
+ "epoch": 0.3847292098253921,
972
+ "grad_norm": 1.03125,
973
+ "learning_rate": 0.00010165339447663587,
974
+ "loss": 8.6257,
975
+ "step": 1300
976
+ },
977
+ {
978
+ "epoch": 0.38768866528558743,
979
+ "grad_norm": 0.43359375,
980
+ "learning_rate": 0.00010033069336079952,
981
+ "loss": 8.7457,
982
+ "step": 1310
983
+ },
984
+ {
985
+ "epoch": 0.39064812074578276,
986
+ "grad_norm": 0.482421875,
987
+ "learning_rate": 9.900793438320037e-05,
988
+ "loss": 8.6771,
989
+ "step": 1320
990
+ },
991
+ {
992
+ "epoch": 0.3936075762059781,
993
+ "grad_norm": 0.50390625,
994
+ "learning_rate": 9.768534898835862e-05,
995
+ "loss": 8.6776,
996
+ "step": 1330
997
+ },
998
+ {
999
+ "epoch": 0.3965670316661734,
1000
+ "grad_norm": 0.6171875,
1001
+ "learning_rate": 9.636316859042259e-05,
1002
+ "loss": 8.6742,
1003
+ "step": 1340
1004
+ },
1005
+ {
1006
+ "epoch": 0.39952648712636873,
1007
+ "grad_norm": 0.859375,
1008
+ "learning_rate": 9.504162453267777e-05,
1009
+ "loss": 8.6419,
1010
+ "step": 1350
1011
+ },
1012
+ {
1013
+ "epoch": 0.40248594258656406,
1014
+ "grad_norm": 0.451171875,
1015
+ "learning_rate": 9.372094804706867e-05,
1016
+ "loss": 8.7098,
1017
+ "step": 1360
1018
+ },
1019
+ {
1020
+ "epoch": 0.4054453980467594,
1021
+ "grad_norm": 0.546875,
1022
+ "learning_rate": 9.24013702137397e-05,
1023
+ "loss": 8.6633,
1024
+ "step": 1370
1025
+ },
1026
+ {
1027
+ "epoch": 0.4084048535069547,
1028
+ "grad_norm": 0.5859375,
1029
+ "learning_rate": 9.108312192060298e-05,
1030
+ "loss": 8.687,
1031
+ "step": 1380
1032
+ },
1033
+ {
1034
+ "epoch": 0.41136430896715004,
1035
+ "grad_norm": 0.890625,
1036
+ "learning_rate": 8.97664338229395e-05,
1037
+ "loss": 8.713,
1038
+ "step": 1390
1039
+ },
1040
+ {
1041
+ "epoch": 0.41432376442734536,
1042
+ "grad_norm": 1.6015625,
1043
+ "learning_rate": 8.845153630304139e-05,
1044
+ "loss": 8.7511,
1045
+ "step": 1400
1046
+ },
1047
+ {
1048
+ "epoch": 0.41432376442734536,
1049
+ "eval_loss": 8.69721794128418,
1050
+ "eval_runtime": 8.7762,
1051
+ "eval_samples_per_second": 171.145,
1052
+ "eval_steps_per_second": 42.843,
1053
+ "step": 1400
1054
+ },
1055
+ {
1056
+ "epoch": 0.4172832198875407,
1057
+ "grad_norm": 0.46484375,
1058
+ "learning_rate": 8.713865942990141e-05,
1059
+ "loss": 8.6655,
1060
+ "step": 1410
1061
+ },
1062
+ {
1063
+ "epoch": 0.420242675347736,
1064
+ "grad_norm": 0.55078125,
1065
+ "learning_rate": 8.582803291895758e-05,
1066
+ "loss": 8.6978,
1067
+ "step": 1420
1068
+ },
1069
+ {
1070
+ "epoch": 0.42320213080793134,
1071
+ "grad_norm": 0.515625,
1072
+ "learning_rate": 8.451988609189987e-05,
1073
+ "loss": 8.7573,
1074
+ "step": 1430
1075
+ },
1076
+ {
1077
+ "epoch": 0.42616158626812667,
1078
+ "grad_norm": 0.70703125,
1079
+ "learning_rate": 8.321444783654524e-05,
1080
+ "loss": 8.6963,
1081
+ "step": 1440
1082
+ },
1083
+ {
1084
+ "epoch": 0.429121041728322,
1085
+ "grad_norm": 1.1953125,
1086
+ "learning_rate": 8.191194656678904e-05,
1087
+ "loss": 8.7627,
1088
+ "step": 1450
1089
+ },
1090
+ {
1091
+ "epoch": 0.4320804971885173,
1092
+ "grad_norm": 0.53125,
1093
+ "learning_rate": 8.061261018263919e-05,
1094
+ "loss": 8.6564,
1095
+ "step": 1460
1096
+ },
1097
+ {
1098
+ "epoch": 0.43503995264871265,
1099
+ "grad_norm": 0.5546875,
1100
+ "learning_rate": 7.931666603034033e-05,
1101
+ "loss": 8.6641,
1102
+ "step": 1470
1103
+ },
1104
+ {
1105
+ "epoch": 0.437999408108908,
1106
+ "grad_norm": 0.83203125,
1107
+ "learning_rate": 7.80243408625947e-05,
1108
+ "loss": 8.6646,
1109
+ "step": 1480
1110
+ },
1111
+ {
1112
+ "epoch": 0.4409588635691033,
1113
+ "grad_norm": 0.71875,
1114
+ "learning_rate": 7.673586079888698e-05,
1115
+ "loss": 8.7323,
1116
+ "step": 1490
1117
+ },
1118
+ {
1119
+ "epoch": 0.4439183190292986,
1120
+ "grad_norm": 1.0078125,
1121
+ "learning_rate": 7.54514512859201e-05,
1122
+ "loss": 8.6167,
1123
+ "step": 1500
1124
+ },
1125
+ {
1126
+ "epoch": 0.44687777448949395,
1127
+ "grad_norm": 0.453125,
1128
+ "learning_rate": 7.417133705816837e-05,
1129
+ "loss": 8.6929,
1130
+ "step": 1510
1131
+ },
1132
+ {
1133
+ "epoch": 0.4498372299496893,
1134
+ "grad_norm": 0.482421875,
1135
+ "learning_rate": 7.289574209855559e-05,
1136
+ "loss": 8.6871,
1137
+ "step": 1520
1138
+ },
1139
+ {
1140
+ "epoch": 0.4527966854098846,
1141
+ "grad_norm": 0.484375,
1142
+ "learning_rate": 7.16248895992645e-05,
1143
+ "loss": 8.6881,
1144
+ "step": 1530
1145
+ },
1146
+ {
1147
+ "epoch": 0.45575614087007993,
1148
+ "grad_norm": 0.7578125,
1149
+ "learning_rate": 7.035900192268464e-05,
1150
+ "loss": 8.6753,
1151
+ "step": 1540
1152
+ },
1153
+ {
1154
+ "epoch": 0.45871559633027525,
1155
+ "grad_norm": 0.83203125,
1156
+ "learning_rate": 6.909830056250527e-05,
1157
+ "loss": 8.7056,
1158
+ "step": 1550
1159
+ },
1160
+ {
1161
+ "epoch": 0.4616750517904706,
1162
+ "grad_norm": 0.431640625,
1163
+ "learning_rate": 6.784300610496048e-05,
1164
+ "loss": 8.706,
1165
+ "step": 1560
1166
+ },
1167
+ {
1168
+ "epoch": 0.46463450725066585,
1169
+ "grad_norm": 0.482421875,
1170
+ "learning_rate": 6.65933381902329e-05,
1171
+ "loss": 8.6888,
1172
+ "step": 1570
1173
+ },
1174
+ {
1175
+ "epoch": 0.4675939627108612,
1176
+ "grad_norm": 0.67578125,
1177
+ "learning_rate": 6.534951547402322e-05,
1178
+ "loss": 8.7158,
1179
+ "step": 1580
1180
+ },
1181
+ {
1182
+ "epoch": 0.4705534181710565,
1183
+ "grad_norm": 0.63671875,
1184
+ "learning_rate": 6.411175558929152e-05,
1185
+ "loss": 8.728,
1186
+ "step": 1590
1187
+ },
1188
+ {
1189
+ "epoch": 0.47351287363125183,
1190
+ "grad_norm": 0.890625,
1191
+ "learning_rate": 6.28802751081779e-05,
1192
+ "loss": 8.729,
1193
+ "step": 1600
1194
+ },
1195
+ {
1196
+ "epoch": 0.47351287363125183,
1197
+ "eval_loss": 8.696118354797363,
1198
+ "eval_runtime": 19.8643,
1199
+ "eval_samples_per_second": 75.613,
1200
+ "eval_steps_per_second": 18.928,
1201
+ "step": 1600
1202
  }
1203
  ],
1204
  "logging_steps": 10,
 
1218
  "attributes": {}
1219
  }
1220
  },
1221
+ "total_flos": 41018144587776.0,
1222
  "train_batch_size": 4,
1223
  "trial_name": null,
1224
  "trial_params": null