|
{ |
|
"best_metric": 33.16084938138103, |
|
"best_model_checkpoint": "./whisper-small-ko-custom-1000h\\checkpoint-1000", |
|
"epoch": 0.02007870853746687, |
|
"global_step": 3000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 2.5e-06, |
|
"loss": 3.5773, |
|
"step": 25 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 5e-06, |
|
"loss": 1.4784, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 0.753, |
|
"step": 75 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 1e-05, |
|
"loss": 0.5502, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 9.948979591836737e-06, |
|
"loss": 0.3562, |
|
"step": 125 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 9.89795918367347e-06, |
|
"loss": 0.2842, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 9.846938775510205e-06, |
|
"loss": 0.2484, |
|
"step": 175 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 9.795918367346939e-06, |
|
"loss": 0.2421, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 9.744897959183674e-06, |
|
"loss": 0.2403, |
|
"step": 225 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 9.693877551020408e-06, |
|
"loss": 0.2222, |
|
"step": 250 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 9.642857142857144e-06, |
|
"loss": 0.202, |
|
"step": 275 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 9.591836734693878e-06, |
|
"loss": 0.2197, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 9.540816326530612e-06, |
|
"loss": 0.2045, |
|
"step": 325 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 9.489795918367348e-06, |
|
"loss": 0.2275, |
|
"step": 350 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 9.438775510204082e-06, |
|
"loss": 0.2023, |
|
"step": 375 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 9.387755102040818e-06, |
|
"loss": 0.2212, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 9.336734693877552e-06, |
|
"loss": 0.211, |
|
"step": 425 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 9.285714285714288e-06, |
|
"loss": 0.2041, |
|
"step": 450 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 9.234693877551022e-06, |
|
"loss": 0.2215, |
|
"step": 475 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 9.183673469387756e-06, |
|
"loss": 0.2012, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 9.13265306122449e-06, |
|
"loss": 0.1932, |
|
"step": 525 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 9.081632653061225e-06, |
|
"loss": 0.2087, |
|
"step": 550 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 9.03061224489796e-06, |
|
"loss": 0.1908, |
|
"step": 575 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 8.979591836734695e-06, |
|
"loss": 0.2019, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 8.92857142857143e-06, |
|
"loss": 0.2097, |
|
"step": 625 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 8.877551020408163e-06, |
|
"loss": 0.2105, |
|
"step": 650 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 8.826530612244899e-06, |
|
"loss": 0.1846, |
|
"step": 675 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 8.775510204081633e-06, |
|
"loss": 0.1748, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.0, |
|
"learning_rate": 8.724489795918369e-06, |
|
"loss": 0.1761, |
|
"step": 725 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.673469387755103e-06, |
|
"loss": 0.1982, |
|
"step": 750 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.622448979591837e-06, |
|
"loss": 0.1926, |
|
"step": 775 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.571428571428571e-06, |
|
"loss": 0.1732, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.520408163265307e-06, |
|
"loss": 0.1841, |
|
"step": 825 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.469387755102042e-06, |
|
"loss": 0.1699, |
|
"step": 850 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.418367346938776e-06, |
|
"loss": 0.2131, |
|
"step": 875 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.36734693877551e-06, |
|
"loss": 0.1828, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.316326530612246e-06, |
|
"loss": 0.1796, |
|
"step": 925 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.26530612244898e-06, |
|
"loss": 0.1909, |
|
"step": 950 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.214285714285714e-06, |
|
"loss": 0.2066, |
|
"step": 975 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.16326530612245e-06, |
|
"loss": 0.1913, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"eval_loss": 0.17891158163547516, |
|
"eval_runtime": 14046.7097, |
|
"eval_samples_per_second": 3.473, |
|
"eval_steps_per_second": 0.434, |
|
"eval_wer": 33.16084938138103, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.112244897959184e-06, |
|
"loss": 0.1426, |
|
"step": 1025 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.06122448979592e-06, |
|
"loss": 0.1976, |
|
"step": 1050 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 8.010204081632654e-06, |
|
"loss": 0.1677, |
|
"step": 1075 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.959183673469388e-06, |
|
"loss": 0.1633, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.908163265306124e-06, |
|
"loss": 0.1994, |
|
"step": 1125 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.857142857142858e-06, |
|
"loss": 0.162, |
|
"step": 1150 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.806122448979593e-06, |
|
"loss": 0.1779, |
|
"step": 1175 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.755102040816327e-06, |
|
"loss": 0.1829, |
|
"step": 1200 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.704081632653061e-06, |
|
"loss": 0.1933, |
|
"step": 1225 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.653061224489796e-06, |
|
"loss": 0.1606, |
|
"step": 1250 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.602040816326531e-06, |
|
"loss": 0.1785, |
|
"step": 1275 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.551020408163265e-06, |
|
"loss": 0.1802, |
|
"step": 1300 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.500000000000001e-06, |
|
"loss": 0.17, |
|
"step": 1325 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.448979591836736e-06, |
|
"loss": 0.1679, |
|
"step": 1350 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.39795918367347e-06, |
|
"loss": 0.157, |
|
"step": 1375 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.346938775510205e-06, |
|
"loss": 0.161, |
|
"step": 1400 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.295918367346939e-06, |
|
"loss": 0.1494, |
|
"step": 1425 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.244897959183675e-06, |
|
"loss": 0.1799, |
|
"step": 1450 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.193877551020409e-06, |
|
"loss": 0.1668, |
|
"step": 1475 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.1428571428571436e-06, |
|
"loss": 0.1798, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.091836734693878e-06, |
|
"loss": 0.1476, |
|
"step": 1525 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 7.0408163265306125e-06, |
|
"loss": 0.1779, |
|
"step": 1550 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.989795918367348e-06, |
|
"loss": 0.1795, |
|
"step": 1575 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.938775510204082e-06, |
|
"loss": 0.1642, |
|
"step": 1600 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.887755102040817e-06, |
|
"loss": 0.1239, |
|
"step": 1625 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.836734693877551e-06, |
|
"loss": 0.2013, |
|
"step": 1650 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.785714285714287e-06, |
|
"loss": 0.1646, |
|
"step": 1675 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.734693877551021e-06, |
|
"loss": 0.1339, |
|
"step": 1700 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.683673469387756e-06, |
|
"loss": 0.1814, |
|
"step": 1725 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.63265306122449e-06, |
|
"loss": 0.1302, |
|
"step": 1750 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.581632653061225e-06, |
|
"loss": 0.1324, |
|
"step": 1775 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.530612244897959e-06, |
|
"loss": 0.1384, |
|
"step": 1800 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.4795918367346946e-06, |
|
"loss": 0.1628, |
|
"step": 1825 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.4285714285714295e-06, |
|
"loss": 0.1712, |
|
"step": 1850 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.3775510204081635e-06, |
|
"loss": 0.153, |
|
"step": 1875 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.326530612244899e-06, |
|
"loss": 0.1588, |
|
"step": 1900 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.275510204081633e-06, |
|
"loss": 0.1579, |
|
"step": 1925 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.224489795918368e-06, |
|
"loss": 0.2014, |
|
"step": 1950 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.173469387755102e-06, |
|
"loss": 0.153, |
|
"step": 1975 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.122448979591837e-06, |
|
"loss": 0.141, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"eval_loss": 0.153132826089859, |
|
"eval_runtime": 14050.6821, |
|
"eval_samples_per_second": 3.472, |
|
"eval_steps_per_second": 0.434, |
|
"eval_wer": 33.639439546838894, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.071428571428571e-06, |
|
"loss": 0.1496, |
|
"step": 2025 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 6.020408163265307e-06, |
|
"loss": 0.1456, |
|
"step": 2050 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.969387755102042e-06, |
|
"loss": 0.1668, |
|
"step": 2075 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.918367346938776e-06, |
|
"loss": 0.1737, |
|
"step": 2100 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.867346938775511e-06, |
|
"loss": 0.1378, |
|
"step": 2125 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.816326530612246e-06, |
|
"loss": 0.159, |
|
"step": 2150 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.7653061224489805e-06, |
|
"loss": 0.1262, |
|
"step": 2175 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.7142857142857145e-06, |
|
"loss": 0.1801, |
|
"step": 2200 |
|
}, |
|
{ |
|
"epoch": 0.01, |
|
"learning_rate": 5.663265306122449e-06, |
|
"loss": 0.1221, |
|
"step": 2225 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 5.6122448979591834e-06, |
|
"loss": 0.1261, |
|
"step": 2250 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 5.561224489795919e-06, |
|
"loss": 0.1584, |
|
"step": 2275 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 5.510204081632653e-06, |
|
"loss": 0.1534, |
|
"step": 2300 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 5.459183673469388e-06, |
|
"loss": 0.1572, |
|
"step": 2325 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 5.408163265306123e-06, |
|
"loss": 0.1091, |
|
"step": 2350 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 5.357142857142857e-06, |
|
"loss": 0.1719, |
|
"step": 2375 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 5.306122448979593e-06, |
|
"loss": 0.1817, |
|
"step": 2400 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 5.255102040816327e-06, |
|
"loss": 0.1407, |
|
"step": 2425 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 5.204081632653062e-06, |
|
"loss": 0.1418, |
|
"step": 2450 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 5.153061224489796e-06, |
|
"loss": 0.1489, |
|
"step": 2475 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 5.1020408163265315e-06, |
|
"loss": 0.1542, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 5.0510204081632655e-06, |
|
"loss": 0.1643, |
|
"step": 2525 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 5e-06, |
|
"loss": 0.1275, |
|
"step": 2550 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.948979591836735e-06, |
|
"loss": 0.1409, |
|
"step": 2575 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.897959183673469e-06, |
|
"loss": 0.1463, |
|
"step": 2600 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.846938775510204e-06, |
|
"loss": 0.1329, |
|
"step": 2625 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.795918367346939e-06, |
|
"loss": 0.1499, |
|
"step": 2650 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.744897959183674e-06, |
|
"loss": 0.121, |
|
"step": 2675 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.693877551020409e-06, |
|
"loss": 0.1134, |
|
"step": 2700 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.642857142857144e-06, |
|
"loss": 0.1321, |
|
"step": 2725 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.591836734693878e-06, |
|
"loss": 0.1424, |
|
"step": 2750 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.540816326530613e-06, |
|
"loss": 0.1478, |
|
"step": 2775 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.489795918367348e-06, |
|
"loss": 0.1289, |
|
"step": 2800 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.438775510204082e-06, |
|
"loss": 0.1426, |
|
"step": 2825 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.3877551020408165e-06, |
|
"loss": 0.1541, |
|
"step": 2850 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.336734693877551e-06, |
|
"loss": 0.1242, |
|
"step": 2875 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.2857142857142855e-06, |
|
"loss": 0.1589, |
|
"step": 2900 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.234693877551021e-06, |
|
"loss": 0.1425, |
|
"step": 2925 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.183673469387755e-06, |
|
"loss": 0.1521, |
|
"step": 2950 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.13265306122449e-06, |
|
"loss": 0.1434, |
|
"step": 2975 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"learning_rate": 4.081632653061225e-06, |
|
"loss": 0.1477, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.02, |
|
"eval_loss": 0.13808605074882507, |
|
"eval_runtime": 13604.3241, |
|
"eval_samples_per_second": 3.586, |
|
"eval_steps_per_second": 0.448, |
|
"eval_wer": 37.97294043601127, |
|
"step": 3000 |
|
} |
|
], |
|
"max_steps": 5000, |
|
"num_train_epochs": 1, |
|
"total_flos": 1.385209921536e+19, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|