latif98's picture
End of training
a34d1b0 verified
raw
history blame contribute delete
No virus
79 kB
{
"best_metric": 0.8267716535433071,
"best_model_checkpoint": "videomae-base-finetuned-isl-numbers_2/checkpoint-3572",
"epoch": 49.02,
"eval_steps": 500,
"global_step": 3800,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002631578947368421,
"grad_norm": 8.72721004486084,
"learning_rate": 1.3157894736842106e-06,
"loss": 3.642,
"step": 10
},
{
"epoch": 0.005263157894736842,
"grad_norm": 11.387261390686035,
"learning_rate": 2.631578947368421e-06,
"loss": 3.6937,
"step": 20
},
{
"epoch": 0.007894736842105263,
"grad_norm": 9.664594650268555,
"learning_rate": 3.9473684210526315e-06,
"loss": 3.6305,
"step": 30
},
{
"epoch": 0.010526315789473684,
"grad_norm": 8.870984077453613,
"learning_rate": 5.263157894736842e-06,
"loss": 3.6148,
"step": 40
},
{
"epoch": 0.013157894736842105,
"grad_norm": 9.365774154663086,
"learning_rate": 6.578947368421053e-06,
"loss": 3.5805,
"step": 50
},
{
"epoch": 0.015789473684210527,
"grad_norm": 9.285212516784668,
"learning_rate": 7.894736842105263e-06,
"loss": 3.564,
"step": 60
},
{
"epoch": 0.018421052631578946,
"grad_norm": 8.437307357788086,
"learning_rate": 9.210526315789474e-06,
"loss": 3.5502,
"step": 70
},
{
"epoch": 0.02,
"eval_accuracy": 0.09448818897637795,
"eval_loss": 3.425055980682373,
"eval_runtime": 196.4771,
"eval_samples_per_second": 0.646,
"eval_steps_per_second": 0.081,
"step": 76
},
{
"epoch": 1.0010526315789474,
"grad_norm": 9.444938659667969,
"learning_rate": 1.0526315789473684e-05,
"loss": 3.4468,
"step": 80
},
{
"epoch": 1.0036842105263157,
"grad_norm": 9.038703918457031,
"learning_rate": 1.1842105263157895e-05,
"loss": 3.3166,
"step": 90
},
{
"epoch": 1.0063157894736843,
"grad_norm": 9.809856414794922,
"learning_rate": 1.3157894736842106e-05,
"loss": 3.3522,
"step": 100
},
{
"epoch": 1.0089473684210526,
"grad_norm": 9.944860458374023,
"learning_rate": 1.4473684210526317e-05,
"loss": 3.1998,
"step": 110
},
{
"epoch": 1.0115789473684211,
"grad_norm": 9.153931617736816,
"learning_rate": 1.5789473684210526e-05,
"loss": 3.175,
"step": 120
},
{
"epoch": 1.0142105263157895,
"grad_norm": 10.077743530273438,
"learning_rate": 1.7105263157894737e-05,
"loss": 3.1365,
"step": 130
},
{
"epoch": 1.016842105263158,
"grad_norm": 10.884031295776367,
"learning_rate": 1.8421052631578947e-05,
"loss": 3.1498,
"step": 140
},
{
"epoch": 1.0194736842105263,
"grad_norm": 10.6647310256958,
"learning_rate": 1.9736842105263158e-05,
"loss": 3.1152,
"step": 150
},
{
"epoch": 1.02,
"eval_accuracy": 0.2755905511811024,
"eval_loss": 3.0364396572113037,
"eval_runtime": 218.5017,
"eval_samples_per_second": 0.581,
"eval_steps_per_second": 0.073,
"step": 152
},
{
"epoch": 2.002105263157895,
"grad_norm": 9.977360725402832,
"learning_rate": 2.105263157894737e-05,
"loss": 2.8494,
"step": 160
},
{
"epoch": 2.004736842105263,
"grad_norm": 10.521862983703613,
"learning_rate": 2.236842105263158e-05,
"loss": 2.7613,
"step": 170
},
{
"epoch": 2.0073684210526315,
"grad_norm": 11.251272201538086,
"learning_rate": 2.368421052631579e-05,
"loss": 2.8185,
"step": 180
},
{
"epoch": 2.01,
"grad_norm": 11.353875160217285,
"learning_rate": 2.5e-05,
"loss": 2.9295,
"step": 190
},
{
"epoch": 2.0126315789473685,
"grad_norm": 13.587601661682129,
"learning_rate": 2.6315789473684212e-05,
"loss": 2.6847,
"step": 200
},
{
"epoch": 2.015263157894737,
"grad_norm": 9.29124641418457,
"learning_rate": 2.7631578947368426e-05,
"loss": 2.6595,
"step": 210
},
{
"epoch": 2.017894736842105,
"grad_norm": 10.696941375732422,
"learning_rate": 2.8947368421052634e-05,
"loss": 2.6365,
"step": 220
},
{
"epoch": 2.02,
"eval_accuracy": 0.3779527559055118,
"eval_loss": 2.619691848754883,
"eval_runtime": 215.3407,
"eval_samples_per_second": 0.59,
"eval_steps_per_second": 0.074,
"step": 228
},
{
"epoch": 3.0005263157894735,
"grad_norm": 10.942057609558105,
"learning_rate": 3.0263157894736844e-05,
"loss": 2.6872,
"step": 230
},
{
"epoch": 3.0031578947368422,
"grad_norm": 12.153741836547852,
"learning_rate": 3.157894736842105e-05,
"loss": 2.4375,
"step": 240
},
{
"epoch": 3.0057894736842106,
"grad_norm": 9.289186477661133,
"learning_rate": 3.289473684210527e-05,
"loss": 2.2951,
"step": 250
},
{
"epoch": 3.008421052631579,
"grad_norm": 11.481297492980957,
"learning_rate": 3.421052631578947e-05,
"loss": 2.3551,
"step": 260
},
{
"epoch": 3.011052631578947,
"grad_norm": 14.849848747253418,
"learning_rate": 3.5526315789473684e-05,
"loss": 2.4047,
"step": 270
},
{
"epoch": 3.013684210526316,
"grad_norm": 12.359841346740723,
"learning_rate": 3.6842105263157895e-05,
"loss": 2.3038,
"step": 280
},
{
"epoch": 3.0163157894736843,
"grad_norm": 12.507572174072266,
"learning_rate": 3.815789473684211e-05,
"loss": 2.3412,
"step": 290
},
{
"epoch": 3.0189473684210526,
"grad_norm": 11.021321296691895,
"learning_rate": 3.9473684210526316e-05,
"loss": 2.3879,
"step": 300
},
{
"epoch": 3.02,
"eval_accuracy": 0.4645669291338583,
"eval_loss": 2.1518962383270264,
"eval_runtime": 218.0756,
"eval_samples_per_second": 0.582,
"eval_steps_per_second": 0.073,
"step": 304
},
{
"epoch": 4.001578947368421,
"grad_norm": 10.985835075378418,
"learning_rate": 4.078947368421053e-05,
"loss": 2.081,
"step": 310
},
{
"epoch": 4.00421052631579,
"grad_norm": 11.158766746520996,
"learning_rate": 4.210526315789474e-05,
"loss": 2.0336,
"step": 320
},
{
"epoch": 4.006842105263158,
"grad_norm": 18.02586555480957,
"learning_rate": 4.342105263157895e-05,
"loss": 1.9907,
"step": 330
},
{
"epoch": 4.009473684210526,
"grad_norm": 9.385698318481445,
"learning_rate": 4.473684210526316e-05,
"loss": 2.1798,
"step": 340
},
{
"epoch": 4.012105263157895,
"grad_norm": 13.736913681030273,
"learning_rate": 4.605263157894737e-05,
"loss": 1.8809,
"step": 350
},
{
"epoch": 4.014736842105263,
"grad_norm": 13.594823837280273,
"learning_rate": 4.736842105263158e-05,
"loss": 1.9144,
"step": 360
},
{
"epoch": 4.017368421052631,
"grad_norm": 17.99551773071289,
"learning_rate": 4.868421052631579e-05,
"loss": 2.0676,
"step": 370
},
{
"epoch": 4.02,
"grad_norm": 14.397540092468262,
"learning_rate": 5e-05,
"loss": 1.9396,
"step": 380
},
{
"epoch": 4.02,
"eval_accuracy": 0.41732283464566927,
"eval_loss": 2.080385684967041,
"eval_runtime": 207.8742,
"eval_samples_per_second": 0.611,
"eval_steps_per_second": 0.077,
"step": 380
},
{
"epoch": 5.002631578947368,
"grad_norm": 15.16203498840332,
"learning_rate": 4.985380116959065e-05,
"loss": 1.7725,
"step": 390
},
{
"epoch": 5.005263157894737,
"grad_norm": 17.93940544128418,
"learning_rate": 4.970760233918128e-05,
"loss": 1.7235,
"step": 400
},
{
"epoch": 5.007894736842105,
"grad_norm": 16.616912841796875,
"learning_rate": 4.956140350877193e-05,
"loss": 1.877,
"step": 410
},
{
"epoch": 5.010526315789473,
"grad_norm": 12.440061569213867,
"learning_rate": 4.941520467836258e-05,
"loss": 1.6128,
"step": 420
},
{
"epoch": 5.0131578947368425,
"grad_norm": 18.26654815673828,
"learning_rate": 4.926900584795322e-05,
"loss": 1.8575,
"step": 430
},
{
"epoch": 5.015789473684211,
"grad_norm": 17.84028434753418,
"learning_rate": 4.912280701754386e-05,
"loss": 1.8118,
"step": 440
},
{
"epoch": 5.018421052631579,
"grad_norm": 15.778058052062988,
"learning_rate": 4.8976608187134504e-05,
"loss": 1.9285,
"step": 450
},
{
"epoch": 5.02,
"eval_accuracy": 0.44881889763779526,
"eval_loss": 1.9335325956344604,
"eval_runtime": 213.1562,
"eval_samples_per_second": 0.596,
"eval_steps_per_second": 0.075,
"step": 456
},
{
"epoch": 6.001052631578947,
"grad_norm": 12.914003372192383,
"learning_rate": 4.883040935672515e-05,
"loss": 1.621,
"step": 460
},
{
"epoch": 6.003684210526316,
"grad_norm": 12.375211715698242,
"learning_rate": 4.868421052631579e-05,
"loss": 1.5505,
"step": 470
},
{
"epoch": 6.0063157894736845,
"grad_norm": 17.16912841796875,
"learning_rate": 4.853801169590643e-05,
"loss": 1.5443,
"step": 480
},
{
"epoch": 6.008947368421053,
"grad_norm": 15.248703002929688,
"learning_rate": 4.839181286549708e-05,
"loss": 1.5193,
"step": 490
},
{
"epoch": 6.011578947368421,
"grad_norm": 10.187657356262207,
"learning_rate": 4.824561403508772e-05,
"loss": 1.4412,
"step": 500
},
{
"epoch": 6.0142105263157895,
"grad_norm": 8.52863883972168,
"learning_rate": 4.8099415204678366e-05,
"loss": 1.5114,
"step": 510
},
{
"epoch": 6.016842105263158,
"grad_norm": 10.42663288116455,
"learning_rate": 4.7953216374269006e-05,
"loss": 1.4669,
"step": 520
},
{
"epoch": 6.019473684210526,
"grad_norm": 13.066956520080566,
"learning_rate": 4.780701754385965e-05,
"loss": 1.5843,
"step": 530
},
{
"epoch": 6.02,
"eval_accuracy": 0.48031496062992124,
"eval_loss": 1.790671706199646,
"eval_runtime": 218.3253,
"eval_samples_per_second": 0.582,
"eval_steps_per_second": 0.073,
"step": 532
},
{
"epoch": 7.002105263157895,
"grad_norm": 14.293023109436035,
"learning_rate": 4.7660818713450294e-05,
"loss": 1.4318,
"step": 540
},
{
"epoch": 7.004736842105263,
"grad_norm": 17.056535720825195,
"learning_rate": 4.751461988304094e-05,
"loss": 1.3673,
"step": 550
},
{
"epoch": 7.0073684210526315,
"grad_norm": 15.318123817443848,
"learning_rate": 4.736842105263158e-05,
"loss": 1.3918,
"step": 560
},
{
"epoch": 7.01,
"grad_norm": 10.944533348083496,
"learning_rate": 4.722222222222222e-05,
"loss": 1.4558,
"step": 570
},
{
"epoch": 7.012631578947368,
"grad_norm": 15.36630916595459,
"learning_rate": 4.707602339181287e-05,
"loss": 1.2652,
"step": 580
},
{
"epoch": 7.015263157894736,
"grad_norm": 19.23952865600586,
"learning_rate": 4.6929824561403515e-05,
"loss": 1.3143,
"step": 590
},
{
"epoch": 7.017894736842106,
"grad_norm": 16.251291275024414,
"learning_rate": 4.678362573099415e-05,
"loss": 1.2387,
"step": 600
},
{
"epoch": 7.02,
"eval_accuracy": 0.3858267716535433,
"eval_loss": 1.8962125778198242,
"eval_runtime": 214.1972,
"eval_samples_per_second": 0.593,
"eval_steps_per_second": 0.075,
"step": 608
},
{
"epoch": 8.000526315789473,
"grad_norm": 15.009963035583496,
"learning_rate": 4.6637426900584796e-05,
"loss": 1.198,
"step": 610
},
{
"epoch": 8.003157894736843,
"grad_norm": 10.402515411376953,
"learning_rate": 4.649122807017544e-05,
"loss": 1.2153,
"step": 620
},
{
"epoch": 8.00578947368421,
"grad_norm": 15.393932342529297,
"learning_rate": 4.634502923976608e-05,
"loss": 1.1167,
"step": 630
},
{
"epoch": 8.00842105263158,
"grad_norm": 6.643280506134033,
"learning_rate": 4.619883040935672e-05,
"loss": 1.2265,
"step": 640
},
{
"epoch": 8.011052631578947,
"grad_norm": 9.686060905456543,
"learning_rate": 4.605263157894737e-05,
"loss": 1.1322,
"step": 650
},
{
"epoch": 8.013684210526316,
"grad_norm": 15.071520805358887,
"learning_rate": 4.590643274853802e-05,
"loss": 0.9386,
"step": 660
},
{
"epoch": 8.016315789473683,
"grad_norm": 13.912795066833496,
"learning_rate": 4.576023391812866e-05,
"loss": 1.0938,
"step": 670
},
{
"epoch": 8.018947368421053,
"grad_norm": 19.317672729492188,
"learning_rate": 4.56140350877193e-05,
"loss": 1.2578,
"step": 680
},
{
"epoch": 8.02,
"eval_accuracy": 0.44881889763779526,
"eval_loss": 1.7191396951675415,
"eval_runtime": 221.1734,
"eval_samples_per_second": 0.574,
"eval_steps_per_second": 0.072,
"step": 684
},
{
"epoch": 9.00157894736842,
"grad_norm": 12.921488761901855,
"learning_rate": 4.5467836257309945e-05,
"loss": 1.0928,
"step": 690
},
{
"epoch": 9.00421052631579,
"grad_norm": 12.777690887451172,
"learning_rate": 4.5321637426900585e-05,
"loss": 1.1085,
"step": 700
},
{
"epoch": 9.006842105263157,
"grad_norm": 13.301621437072754,
"learning_rate": 4.517543859649123e-05,
"loss": 1.2055,
"step": 710
},
{
"epoch": 9.009473684210526,
"grad_norm": 11.155688285827637,
"learning_rate": 4.502923976608187e-05,
"loss": 1.0145,
"step": 720
},
{
"epoch": 9.012105263157896,
"grad_norm": 19.05325698852539,
"learning_rate": 4.488304093567251e-05,
"loss": 1.0998,
"step": 730
},
{
"epoch": 9.014736842105263,
"grad_norm": 14.210393905639648,
"learning_rate": 4.473684210526316e-05,
"loss": 0.8998,
"step": 740
},
{
"epoch": 9.017368421052632,
"grad_norm": 20.1743106842041,
"learning_rate": 4.4590643274853806e-05,
"loss": 1.1693,
"step": 750
},
{
"epoch": 9.02,
"grad_norm": 17.429399490356445,
"learning_rate": 4.4444444444444447e-05,
"loss": 0.9611,
"step": 760
},
{
"epoch": 9.02,
"eval_accuracy": 0.4881889763779528,
"eval_loss": 1.7361595630645752,
"eval_runtime": 221.2054,
"eval_samples_per_second": 0.574,
"eval_steps_per_second": 0.072,
"step": 760
},
{
"epoch": 10.00263157894737,
"grad_norm": 6.412835597991943,
"learning_rate": 4.429824561403509e-05,
"loss": 0.8146,
"step": 770
},
{
"epoch": 10.005263157894737,
"grad_norm": 7.542128562927246,
"learning_rate": 4.4152046783625734e-05,
"loss": 0.8144,
"step": 780
},
{
"epoch": 10.007894736842106,
"grad_norm": 9.76062297821045,
"learning_rate": 4.400584795321638e-05,
"loss": 0.8581,
"step": 790
},
{
"epoch": 10.010526315789473,
"grad_norm": 15.923550605773926,
"learning_rate": 4.3859649122807014e-05,
"loss": 0.9352,
"step": 800
},
{
"epoch": 10.013157894736842,
"grad_norm": 15.5842924118042,
"learning_rate": 4.371345029239766e-05,
"loss": 0.8613,
"step": 810
},
{
"epoch": 10.01578947368421,
"grad_norm": 14.166058540344238,
"learning_rate": 4.356725146198831e-05,
"loss": 0.9478,
"step": 820
},
{
"epoch": 10.01842105263158,
"grad_norm": 15.743328094482422,
"learning_rate": 4.342105263157895e-05,
"loss": 0.9247,
"step": 830
},
{
"epoch": 10.02,
"eval_accuracy": 0.5905511811023622,
"eval_loss": 1.3898028135299683,
"eval_runtime": 219.3829,
"eval_samples_per_second": 0.579,
"eval_steps_per_second": 0.073,
"step": 836
},
{
"epoch": 11.001052631578947,
"grad_norm": 15.066838264465332,
"learning_rate": 4.327485380116959e-05,
"loss": 1.0099,
"step": 840
},
{
"epoch": 11.003684210526316,
"grad_norm": 22.22589683532715,
"learning_rate": 4.3128654970760236e-05,
"loss": 0.9153,
"step": 850
},
{
"epoch": 11.006315789473684,
"grad_norm": 12.97102165222168,
"learning_rate": 4.298245614035088e-05,
"loss": 0.7456,
"step": 860
},
{
"epoch": 11.008947368421053,
"grad_norm": 15.3944730758667,
"learning_rate": 4.283625730994152e-05,
"loss": 0.6651,
"step": 870
},
{
"epoch": 11.01157894736842,
"grad_norm": 18.06490707397461,
"learning_rate": 4.269005847953216e-05,
"loss": 0.9046,
"step": 880
},
{
"epoch": 11.01421052631579,
"grad_norm": 21.835529327392578,
"learning_rate": 4.254385964912281e-05,
"loss": 0.9137,
"step": 890
},
{
"epoch": 11.016842105263159,
"grad_norm": 8.073750495910645,
"learning_rate": 4.239766081871345e-05,
"loss": 0.9701,
"step": 900
},
{
"epoch": 11.019473684210526,
"grad_norm": 13.15185260772705,
"learning_rate": 4.22514619883041e-05,
"loss": 0.8107,
"step": 910
},
{
"epoch": 11.02,
"eval_accuracy": 0.4094488188976378,
"eval_loss": 1.9588265419006348,
"eval_runtime": 217.239,
"eval_samples_per_second": 0.585,
"eval_steps_per_second": 0.074,
"step": 912
},
{
"epoch": 12.002105263157894,
"grad_norm": 13.381047248840332,
"learning_rate": 4.210526315789474e-05,
"loss": 0.5541,
"step": 920
},
{
"epoch": 12.004736842105263,
"grad_norm": 14.908097267150879,
"learning_rate": 4.195906432748538e-05,
"loss": 0.9128,
"step": 930
},
{
"epoch": 12.007368421052632,
"grad_norm": 13.663619995117188,
"learning_rate": 4.1812865497076025e-05,
"loss": 0.8758,
"step": 940
},
{
"epoch": 12.01,
"grad_norm": 17.63921546936035,
"learning_rate": 4.166666666666667e-05,
"loss": 0.6901,
"step": 950
},
{
"epoch": 12.012631578947369,
"grad_norm": 15.174348831176758,
"learning_rate": 4.152046783625731e-05,
"loss": 0.6394,
"step": 960
},
{
"epoch": 12.015263157894736,
"grad_norm": 11.022780418395996,
"learning_rate": 4.137426900584795e-05,
"loss": 0.7149,
"step": 970
},
{
"epoch": 12.017894736842106,
"grad_norm": 13.170819282531738,
"learning_rate": 4.12280701754386e-05,
"loss": 0.7618,
"step": 980
},
{
"epoch": 12.02,
"eval_accuracy": 0.6614173228346457,
"eval_loss": 1.141591191291809,
"eval_runtime": 213.5376,
"eval_samples_per_second": 0.595,
"eval_steps_per_second": 0.075,
"step": 988
},
{
"epoch": 13.000526315789473,
"grad_norm": 12.721129417419434,
"learning_rate": 4.1081871345029247e-05,
"loss": 0.6057,
"step": 990
},
{
"epoch": 13.003157894736843,
"grad_norm": 13.658514976501465,
"learning_rate": 4.093567251461988e-05,
"loss": 0.7183,
"step": 1000
},
{
"epoch": 13.00578947368421,
"grad_norm": 14.460805892944336,
"learning_rate": 4.078947368421053e-05,
"loss": 0.6447,
"step": 1010
},
{
"epoch": 13.00842105263158,
"grad_norm": 12.585667610168457,
"learning_rate": 4.0643274853801174e-05,
"loss": 0.7182,
"step": 1020
},
{
"epoch": 13.011052631578947,
"grad_norm": 12.871528625488281,
"learning_rate": 4.0497076023391814e-05,
"loss": 0.7526,
"step": 1030
},
{
"epoch": 13.013684210526316,
"grad_norm": 26.28831672668457,
"learning_rate": 4.0350877192982455e-05,
"loss": 0.8322,
"step": 1040
},
{
"epoch": 13.016315789473683,
"grad_norm": 9.93556022644043,
"learning_rate": 4.02046783625731e-05,
"loss": 0.712,
"step": 1050
},
{
"epoch": 13.018947368421053,
"grad_norm": 17.637866973876953,
"learning_rate": 4.005847953216375e-05,
"loss": 0.7083,
"step": 1060
},
{
"epoch": 13.02,
"eval_accuracy": 0.6614173228346457,
"eval_loss": 1.281165599822998,
"eval_runtime": 215.2472,
"eval_samples_per_second": 0.59,
"eval_steps_per_second": 0.074,
"step": 1064
},
{
"epoch": 14.00157894736842,
"grad_norm": 5.20871114730835,
"learning_rate": 3.991228070175439e-05,
"loss": 0.6391,
"step": 1070
},
{
"epoch": 14.00421052631579,
"grad_norm": 9.707756996154785,
"learning_rate": 3.976608187134503e-05,
"loss": 0.6656,
"step": 1080
},
{
"epoch": 14.006842105263157,
"grad_norm": 9.207053184509277,
"learning_rate": 3.9619883040935676e-05,
"loss": 0.522,
"step": 1090
},
{
"epoch": 14.009473684210526,
"grad_norm": 10.508357048034668,
"learning_rate": 3.9473684210526316e-05,
"loss": 0.5712,
"step": 1100
},
{
"epoch": 14.012105263157896,
"grad_norm": 9.813148498535156,
"learning_rate": 3.932748538011696e-05,
"loss": 0.5209,
"step": 1110
},
{
"epoch": 14.014736842105263,
"grad_norm": 23.12822723388672,
"learning_rate": 3.9181286549707604e-05,
"loss": 0.6701,
"step": 1120
},
{
"epoch": 14.017368421052632,
"grad_norm": 16.006336212158203,
"learning_rate": 3.9035087719298244e-05,
"loss": 0.6569,
"step": 1130
},
{
"epoch": 14.02,
"grad_norm": 15.42993450164795,
"learning_rate": 3.888888888888889e-05,
"loss": 0.7098,
"step": 1140
},
{
"epoch": 14.02,
"eval_accuracy": 0.5196850393700787,
"eval_loss": 1.4601401090621948,
"eval_runtime": 222.0416,
"eval_samples_per_second": 0.572,
"eval_steps_per_second": 0.072,
"step": 1140
},
{
"epoch": 15.00263157894737,
"grad_norm": 12.818732261657715,
"learning_rate": 3.874269005847954e-05,
"loss": 0.4586,
"step": 1150
},
{
"epoch": 15.005263157894737,
"grad_norm": 18.07966423034668,
"learning_rate": 3.859649122807018e-05,
"loss": 0.6118,
"step": 1160
},
{
"epoch": 15.007894736842106,
"grad_norm": 10.6485013961792,
"learning_rate": 3.845029239766082e-05,
"loss": 0.5496,
"step": 1170
},
{
"epoch": 15.010526315789473,
"grad_norm": 10.82335376739502,
"learning_rate": 3.8304093567251465e-05,
"loss": 0.5104,
"step": 1180
},
{
"epoch": 15.013157894736842,
"grad_norm": 16.787870407104492,
"learning_rate": 3.815789473684211e-05,
"loss": 0.5459,
"step": 1190
},
{
"epoch": 15.01578947368421,
"grad_norm": 25.27979850769043,
"learning_rate": 3.8011695906432746e-05,
"loss": 0.7882,
"step": 1200
},
{
"epoch": 15.01842105263158,
"grad_norm": 6.820335865020752,
"learning_rate": 3.786549707602339e-05,
"loss": 0.4601,
"step": 1210
},
{
"epoch": 15.02,
"eval_accuracy": 0.6692913385826772,
"eval_loss": 1.1275523900985718,
"eval_runtime": 223.6984,
"eval_samples_per_second": 0.568,
"eval_steps_per_second": 0.072,
"step": 1216
},
{
"epoch": 16.001052631578947,
"grad_norm": 20.168317794799805,
"learning_rate": 3.771929824561404e-05,
"loss": 0.576,
"step": 1220
},
{
"epoch": 16.003684210526316,
"grad_norm": 14.202433586120605,
"learning_rate": 3.757309941520468e-05,
"loss": 0.4841,
"step": 1230
},
{
"epoch": 16.006315789473685,
"grad_norm": 17.228273391723633,
"learning_rate": 3.742690058479532e-05,
"loss": 0.4666,
"step": 1240
},
{
"epoch": 16.00894736842105,
"grad_norm": 14.974773406982422,
"learning_rate": 3.728070175438597e-05,
"loss": 0.6221,
"step": 1250
},
{
"epoch": 16.01157894736842,
"grad_norm": 18.37944793701172,
"learning_rate": 3.713450292397661e-05,
"loss": 0.4969,
"step": 1260
},
{
"epoch": 16.01421052631579,
"grad_norm": 11.213744163513184,
"learning_rate": 3.6988304093567254e-05,
"loss": 0.3893,
"step": 1270
},
{
"epoch": 16.01684210526316,
"grad_norm": 15.736601829528809,
"learning_rate": 3.6842105263157895e-05,
"loss": 0.5969,
"step": 1280
},
{
"epoch": 16.019473684210528,
"grad_norm": 21.78984260559082,
"learning_rate": 3.669590643274854e-05,
"loss": 0.5684,
"step": 1290
},
{
"epoch": 16.02,
"eval_accuracy": 0.5590551181102362,
"eval_loss": 1.479176640510559,
"eval_runtime": 210.4765,
"eval_samples_per_second": 0.603,
"eval_steps_per_second": 0.076,
"step": 1292
},
{
"epoch": 17.002105263157894,
"grad_norm": 18.97657585144043,
"learning_rate": 3.654970760233918e-05,
"loss": 0.4405,
"step": 1300
},
{
"epoch": 17.004736842105263,
"grad_norm": 16.370620727539062,
"learning_rate": 3.640350877192983e-05,
"loss": 0.4933,
"step": 1310
},
{
"epoch": 17.007368421052632,
"grad_norm": 18.727680206298828,
"learning_rate": 3.625730994152047e-05,
"loss": 0.7038,
"step": 1320
},
{
"epoch": 17.01,
"grad_norm": 11.238962173461914,
"learning_rate": 3.611111111111111e-05,
"loss": 0.4704,
"step": 1330
},
{
"epoch": 17.012631578947367,
"grad_norm": 8.99775218963623,
"learning_rate": 3.5964912280701756e-05,
"loss": 0.4999,
"step": 1340
},
{
"epoch": 17.015263157894736,
"grad_norm": 4.336584091186523,
"learning_rate": 3.5818713450292403e-05,
"loss": 0.4195,
"step": 1350
},
{
"epoch": 17.017894736842106,
"grad_norm": 31.79267120361328,
"learning_rate": 3.5672514619883044e-05,
"loss": 0.5044,
"step": 1360
},
{
"epoch": 17.02,
"eval_accuracy": 0.6614173228346457,
"eval_loss": 1.1235504150390625,
"eval_runtime": 226.3183,
"eval_samples_per_second": 0.561,
"eval_steps_per_second": 0.071,
"step": 1368
},
{
"epoch": 18.000526315789475,
"grad_norm": 5.108359336853027,
"learning_rate": 3.5526315789473684e-05,
"loss": 0.4842,
"step": 1370
},
{
"epoch": 18.00315789473684,
"grad_norm": 6.3470354080200195,
"learning_rate": 3.538011695906433e-05,
"loss": 0.3885,
"step": 1380
},
{
"epoch": 18.00578947368421,
"grad_norm": 14.960538864135742,
"learning_rate": 3.523391812865498e-05,
"loss": 0.4218,
"step": 1390
},
{
"epoch": 18.00842105263158,
"grad_norm": 3.5540506839752197,
"learning_rate": 3.508771929824561e-05,
"loss": 0.5639,
"step": 1400
},
{
"epoch": 18.01105263157895,
"grad_norm": 8.745479583740234,
"learning_rate": 3.494152046783626e-05,
"loss": 0.2593,
"step": 1410
},
{
"epoch": 18.013684210526314,
"grad_norm": 16.45661735534668,
"learning_rate": 3.4795321637426905e-05,
"loss": 0.5067,
"step": 1420
},
{
"epoch": 18.016315789473683,
"grad_norm": 29.834877014160156,
"learning_rate": 3.4649122807017546e-05,
"loss": 0.4747,
"step": 1430
},
{
"epoch": 18.018947368421053,
"grad_norm": 3.380971670150757,
"learning_rate": 3.4502923976608186e-05,
"loss": 0.4551,
"step": 1440
},
{
"epoch": 18.02,
"eval_accuracy": 0.6062992125984252,
"eval_loss": 1.3894457817077637,
"eval_runtime": 208.6422,
"eval_samples_per_second": 0.609,
"eval_steps_per_second": 0.077,
"step": 1444
},
{
"epoch": 19.001578947368422,
"grad_norm": 17.57008171081543,
"learning_rate": 3.435672514619883e-05,
"loss": 0.4354,
"step": 1450
},
{
"epoch": 19.004210526315788,
"grad_norm": 16.11405372619629,
"learning_rate": 3.421052631578947e-05,
"loss": 0.4899,
"step": 1460
},
{
"epoch": 19.006842105263157,
"grad_norm": 11.66887092590332,
"learning_rate": 3.406432748538012e-05,
"loss": 0.4279,
"step": 1470
},
{
"epoch": 19.009473684210526,
"grad_norm": 13.749747276306152,
"learning_rate": 3.391812865497076e-05,
"loss": 0.39,
"step": 1480
},
{
"epoch": 19.012105263157896,
"grad_norm": 22.861486434936523,
"learning_rate": 3.377192982456141e-05,
"loss": 0.3361,
"step": 1490
},
{
"epoch": 19.014736842105265,
"grad_norm": 19.228759765625,
"learning_rate": 3.362573099415205e-05,
"loss": 0.3906,
"step": 1500
},
{
"epoch": 19.01736842105263,
"grad_norm": 21.020246505737305,
"learning_rate": 3.3479532163742695e-05,
"loss": 0.5774,
"step": 1510
},
{
"epoch": 19.02,
"grad_norm": 9.788912773132324,
"learning_rate": 3.3333333333333335e-05,
"loss": 0.3488,
"step": 1520
},
{
"epoch": 19.02,
"eval_accuracy": 0.6614173228346457,
"eval_loss": 1.2918061017990112,
"eval_runtime": 222.2729,
"eval_samples_per_second": 0.571,
"eval_steps_per_second": 0.072,
"step": 1520
},
{
"epoch": 20.00263157894737,
"grad_norm": 15.891140937805176,
"learning_rate": 3.3187134502923975e-05,
"loss": 0.2902,
"step": 1530
},
{
"epoch": 20.00526315789474,
"grad_norm": 5.71783971786499,
"learning_rate": 3.304093567251462e-05,
"loss": 0.2562,
"step": 1540
},
{
"epoch": 20.007894736842104,
"grad_norm": 5.522655487060547,
"learning_rate": 3.289473684210527e-05,
"loss": 0.3025,
"step": 1550
},
{
"epoch": 20.010526315789473,
"grad_norm": 4.9579691886901855,
"learning_rate": 3.274853801169591e-05,
"loss": 0.1642,
"step": 1560
},
{
"epoch": 20.013157894736842,
"grad_norm": 15.316770553588867,
"learning_rate": 3.260233918128655e-05,
"loss": 0.4431,
"step": 1570
},
{
"epoch": 20.01578947368421,
"grad_norm": 2.228877067565918,
"learning_rate": 3.24561403508772e-05,
"loss": 0.3193,
"step": 1580
},
{
"epoch": 20.018421052631577,
"grad_norm": 9.935259819030762,
"learning_rate": 3.230994152046784e-05,
"loss": 0.4711,
"step": 1590
},
{
"epoch": 20.02,
"eval_accuracy": 0.6299212598425197,
"eval_loss": 1.2510179281234741,
"eval_runtime": 212.9147,
"eval_samples_per_second": 0.596,
"eval_steps_per_second": 0.075,
"step": 1596
},
{
"epoch": 21.001052631578947,
"grad_norm": 15.269537925720215,
"learning_rate": 3.216374269005848e-05,
"loss": 0.2989,
"step": 1600
},
{
"epoch": 21.003684210526316,
"grad_norm": 11.242606163024902,
"learning_rate": 3.2017543859649124e-05,
"loss": 0.3384,
"step": 1610
},
{
"epoch": 21.006315789473685,
"grad_norm": 14.554858207702637,
"learning_rate": 3.187134502923977e-05,
"loss": 0.305,
"step": 1620
},
{
"epoch": 21.00894736842105,
"grad_norm": 3.1583755016326904,
"learning_rate": 3.172514619883041e-05,
"loss": 0.1487,
"step": 1630
},
{
"epoch": 21.01157894736842,
"grad_norm": 30.419754028320312,
"learning_rate": 3.157894736842105e-05,
"loss": 0.4566,
"step": 1640
},
{
"epoch": 21.01421052631579,
"grad_norm": 7.953301429748535,
"learning_rate": 3.14327485380117e-05,
"loss": 0.4296,
"step": 1650
},
{
"epoch": 21.01684210526316,
"grad_norm": 19.097740173339844,
"learning_rate": 3.128654970760234e-05,
"loss": 0.3578,
"step": 1660
},
{
"epoch": 21.019473684210528,
"grad_norm": 23.819265365600586,
"learning_rate": 3.1140350877192986e-05,
"loss": 0.3451,
"step": 1670
},
{
"epoch": 21.02,
"eval_accuracy": 0.6692913385826772,
"eval_loss": 1.126503586769104,
"eval_runtime": 207.0446,
"eval_samples_per_second": 0.613,
"eval_steps_per_second": 0.077,
"step": 1672
},
{
"epoch": 22.002105263157894,
"grad_norm": 1.5578737258911133,
"learning_rate": 3.0994152046783626e-05,
"loss": 0.3176,
"step": 1680
},
{
"epoch": 22.004736842105263,
"grad_norm": 1.8389073610305786,
"learning_rate": 3.084795321637427e-05,
"loss": 0.2775,
"step": 1690
},
{
"epoch": 22.007368421052632,
"grad_norm": 16.95042610168457,
"learning_rate": 3.0701754385964913e-05,
"loss": 0.2394,
"step": 1700
},
{
"epoch": 22.01,
"grad_norm": 5.184008598327637,
"learning_rate": 3.055555555555556e-05,
"loss": 0.4281,
"step": 1710
},
{
"epoch": 22.012631578947367,
"grad_norm": 13.091333389282227,
"learning_rate": 3.0409356725146197e-05,
"loss": 0.3197,
"step": 1720
},
{
"epoch": 22.015263157894736,
"grad_norm": 18.41486358642578,
"learning_rate": 3.0263157894736844e-05,
"loss": 0.3218,
"step": 1730
},
{
"epoch": 22.017894736842106,
"grad_norm": 23.512739181518555,
"learning_rate": 3.0116959064327488e-05,
"loss": 0.394,
"step": 1740
},
{
"epoch": 22.02,
"eval_accuracy": 0.6377952755905512,
"eval_loss": 1.167567253112793,
"eval_runtime": 217.2249,
"eval_samples_per_second": 0.585,
"eval_steps_per_second": 0.074,
"step": 1748
},
{
"epoch": 23.000526315789475,
"grad_norm": 3.652381181716919,
"learning_rate": 2.997076023391813e-05,
"loss": 0.3088,
"step": 1750
},
{
"epoch": 23.00315789473684,
"grad_norm": 9.2783203125,
"learning_rate": 2.9824561403508772e-05,
"loss": 0.2488,
"step": 1760
},
{
"epoch": 23.00578947368421,
"grad_norm": 4.100778579711914,
"learning_rate": 2.9678362573099415e-05,
"loss": 0.1505,
"step": 1770
},
{
"epoch": 23.00842105263158,
"grad_norm": 11.44543170928955,
"learning_rate": 2.9532163742690062e-05,
"loss": 0.392,
"step": 1780
},
{
"epoch": 23.01105263157895,
"grad_norm": 1.7498180866241455,
"learning_rate": 2.9385964912280706e-05,
"loss": 0.264,
"step": 1790
},
{
"epoch": 23.013684210526314,
"grad_norm": 16.737688064575195,
"learning_rate": 2.9239766081871346e-05,
"loss": 0.1904,
"step": 1800
},
{
"epoch": 23.016315789473683,
"grad_norm": 3.4222452640533447,
"learning_rate": 2.909356725146199e-05,
"loss": 0.3417,
"step": 1810
},
{
"epoch": 23.018947368421053,
"grad_norm": 21.721633911132812,
"learning_rate": 2.8947368421052634e-05,
"loss": 0.234,
"step": 1820
},
{
"epoch": 23.02,
"eval_accuracy": 0.7086614173228346,
"eval_loss": 1.0714032649993896,
"eval_runtime": 218.1648,
"eval_samples_per_second": 0.582,
"eval_steps_per_second": 0.073,
"step": 1824
},
{
"epoch": 24.001578947368422,
"grad_norm": 27.658559799194336,
"learning_rate": 2.8801169590643277e-05,
"loss": 0.3998,
"step": 1830
},
{
"epoch": 24.004210526315788,
"grad_norm": 9.68593692779541,
"learning_rate": 2.8654970760233917e-05,
"loss": 0.156,
"step": 1840
},
{
"epoch": 24.006842105263157,
"grad_norm": 26.603404998779297,
"learning_rate": 2.850877192982456e-05,
"loss": 0.4107,
"step": 1850
},
{
"epoch": 24.009473684210526,
"grad_norm": 15.769572257995605,
"learning_rate": 2.8362573099415208e-05,
"loss": 0.2049,
"step": 1860
},
{
"epoch": 24.012105263157896,
"grad_norm": 13.503707885742188,
"learning_rate": 2.821637426900585e-05,
"loss": 0.278,
"step": 1870
},
{
"epoch": 24.014736842105265,
"grad_norm": 12.523197174072266,
"learning_rate": 2.8070175438596492e-05,
"loss": 0.2822,
"step": 1880
},
{
"epoch": 24.01736842105263,
"grad_norm": 17.988203048706055,
"learning_rate": 2.7923976608187135e-05,
"loss": 0.2812,
"step": 1890
},
{
"epoch": 24.02,
"grad_norm": 26.06406593322754,
"learning_rate": 2.777777777777778e-05,
"loss": 0.2318,
"step": 1900
},
{
"epoch": 24.02,
"eval_accuracy": 0.6377952755905512,
"eval_loss": 1.2646870613098145,
"eval_runtime": 209.6259,
"eval_samples_per_second": 0.606,
"eval_steps_per_second": 0.076,
"step": 1900
},
{
"epoch": 25.00263157894737,
"grad_norm": 1.2127106189727783,
"learning_rate": 2.7631578947368426e-05,
"loss": 0.2181,
"step": 1910
},
{
"epoch": 25.00526315789474,
"grad_norm": 11.234424591064453,
"learning_rate": 2.7485380116959063e-05,
"loss": 0.3224,
"step": 1920
},
{
"epoch": 25.007894736842104,
"grad_norm": 5.068539619445801,
"learning_rate": 2.733918128654971e-05,
"loss": 0.2047,
"step": 1930
},
{
"epoch": 25.010526315789473,
"grad_norm": 22.778417587280273,
"learning_rate": 2.7192982456140354e-05,
"loss": 0.2739,
"step": 1940
},
{
"epoch": 25.013157894736842,
"grad_norm": 9.123680114746094,
"learning_rate": 2.7046783625730997e-05,
"loss": 0.2422,
"step": 1950
},
{
"epoch": 25.01578947368421,
"grad_norm": 10.594013214111328,
"learning_rate": 2.6900584795321637e-05,
"loss": 0.3974,
"step": 1960
},
{
"epoch": 25.018421052631577,
"grad_norm": 17.55132293701172,
"learning_rate": 2.675438596491228e-05,
"loss": 0.4294,
"step": 1970
},
{
"epoch": 25.02,
"eval_accuracy": 0.7480314960629921,
"eval_loss": 1.0250136852264404,
"eval_runtime": 226.4979,
"eval_samples_per_second": 0.561,
"eval_steps_per_second": 0.071,
"step": 1976
},
{
"epoch": 26.001052631578947,
"grad_norm": 8.826836585998535,
"learning_rate": 2.6608187134502928e-05,
"loss": 0.1744,
"step": 1980
},
{
"epoch": 26.003684210526316,
"grad_norm": 2.1957454681396484,
"learning_rate": 2.6461988304093572e-05,
"loss": 0.3863,
"step": 1990
},
{
"epoch": 26.006315789473685,
"grad_norm": 38.29682159423828,
"learning_rate": 2.6315789473684212e-05,
"loss": 0.3005,
"step": 2000
},
{
"epoch": 26.00894736842105,
"grad_norm": 0.6198650002479553,
"learning_rate": 2.6169590643274856e-05,
"loss": 0.2257,
"step": 2010
},
{
"epoch": 26.01157894736842,
"grad_norm": 17.415178298950195,
"learning_rate": 2.60233918128655e-05,
"loss": 0.4258,
"step": 2020
},
{
"epoch": 26.01421052631579,
"grad_norm": 3.995495319366455,
"learning_rate": 2.5877192982456143e-05,
"loss": 0.2217,
"step": 2030
},
{
"epoch": 26.01684210526316,
"grad_norm": 19.10511589050293,
"learning_rate": 2.5730994152046783e-05,
"loss": 0.2082,
"step": 2040
},
{
"epoch": 26.019473684210528,
"grad_norm": 22.767173767089844,
"learning_rate": 2.5584795321637427e-05,
"loss": 0.2084,
"step": 2050
},
{
"epoch": 26.02,
"eval_accuracy": 0.6850393700787402,
"eval_loss": 1.1360818147659302,
"eval_runtime": 212.9674,
"eval_samples_per_second": 0.596,
"eval_steps_per_second": 0.075,
"step": 2052
},
{
"epoch": 27.002105263157894,
"grad_norm": 0.7093040943145752,
"learning_rate": 2.5438596491228074e-05,
"loss": 0.3909,
"step": 2060
},
{
"epoch": 27.004736842105263,
"grad_norm": 4.107806205749512,
"learning_rate": 2.5292397660818717e-05,
"loss": 0.1481,
"step": 2070
},
{
"epoch": 27.007368421052632,
"grad_norm": 8.628134727478027,
"learning_rate": 2.5146198830409358e-05,
"loss": 0.27,
"step": 2080
},
{
"epoch": 27.01,
"grad_norm": 5.203458786010742,
"learning_rate": 2.5e-05,
"loss": 0.2593,
"step": 2090
},
{
"epoch": 27.012631578947367,
"grad_norm": 12.856894493103027,
"learning_rate": 2.485380116959064e-05,
"loss": 0.1478,
"step": 2100
},
{
"epoch": 27.015263157894736,
"grad_norm": 0.4042441248893738,
"learning_rate": 2.470760233918129e-05,
"loss": 0.3425,
"step": 2110
},
{
"epoch": 27.017894736842106,
"grad_norm": 22.888172149658203,
"learning_rate": 2.456140350877193e-05,
"loss": 0.1724,
"step": 2120
},
{
"epoch": 27.02,
"eval_accuracy": 0.7401574803149606,
"eval_loss": 0.8791013956069946,
"eval_runtime": 226.6646,
"eval_samples_per_second": 0.56,
"eval_steps_per_second": 0.071,
"step": 2128
},
{
"epoch": 28.000526315789475,
"grad_norm": 8.835603713989258,
"learning_rate": 2.4415204678362576e-05,
"loss": 0.2062,
"step": 2130
},
{
"epoch": 28.00315789473684,
"grad_norm": 30.87272071838379,
"learning_rate": 2.4269005847953216e-05,
"loss": 0.2398,
"step": 2140
},
{
"epoch": 28.00578947368421,
"grad_norm": 3.532879590988159,
"learning_rate": 2.412280701754386e-05,
"loss": 0.152,
"step": 2150
},
{
"epoch": 28.00842105263158,
"grad_norm": 7.7977752685546875,
"learning_rate": 2.3976608187134503e-05,
"loss": 0.2509,
"step": 2160
},
{
"epoch": 28.01105263157895,
"grad_norm": 22.783979415893555,
"learning_rate": 2.3830409356725147e-05,
"loss": 0.2393,
"step": 2170
},
{
"epoch": 28.013684210526314,
"grad_norm": 16.93852424621582,
"learning_rate": 2.368421052631579e-05,
"loss": 0.2569,
"step": 2180
},
{
"epoch": 28.016315789473683,
"grad_norm": 15.875041961669922,
"learning_rate": 2.3538011695906434e-05,
"loss": 0.2386,
"step": 2190
},
{
"epoch": 28.018947368421053,
"grad_norm": 0.38901668787002563,
"learning_rate": 2.3391812865497074e-05,
"loss": 0.1715,
"step": 2200
},
{
"epoch": 28.02,
"eval_accuracy": 0.7559055118110236,
"eval_loss": 0.7549403309822083,
"eval_runtime": 220.4782,
"eval_samples_per_second": 0.576,
"eval_steps_per_second": 0.073,
"step": 2204
},
{
"epoch": 29.001578947368422,
"grad_norm": 14.774117469787598,
"learning_rate": 2.324561403508772e-05,
"loss": 0.2363,
"step": 2210
},
{
"epoch": 29.004210526315788,
"grad_norm": 25.879791259765625,
"learning_rate": 2.309941520467836e-05,
"loss": 0.1621,
"step": 2220
},
{
"epoch": 29.006842105263157,
"grad_norm": 29.524221420288086,
"learning_rate": 2.295321637426901e-05,
"loss": 0.2586,
"step": 2230
},
{
"epoch": 29.009473684210526,
"grad_norm": 0.5666794776916504,
"learning_rate": 2.280701754385965e-05,
"loss": 0.1223,
"step": 2240
},
{
"epoch": 29.012105263157896,
"grad_norm": 1.050584077835083,
"learning_rate": 2.2660818713450292e-05,
"loss": 0.1105,
"step": 2250
},
{
"epoch": 29.014736842105265,
"grad_norm": 23.264785766601562,
"learning_rate": 2.2514619883040936e-05,
"loss": 0.1934,
"step": 2260
},
{
"epoch": 29.01736842105263,
"grad_norm": 21.131118774414062,
"learning_rate": 2.236842105263158e-05,
"loss": 0.3142,
"step": 2270
},
{
"epoch": 29.02,
"grad_norm": 6.656404495239258,
"learning_rate": 2.2222222222222223e-05,
"loss": 0.2719,
"step": 2280
},
{
"epoch": 29.02,
"eval_accuracy": 0.7716535433070866,
"eval_loss": 0.770797073841095,
"eval_runtime": 215.6644,
"eval_samples_per_second": 0.589,
"eval_steps_per_second": 0.074,
"step": 2280
},
{
"epoch": 30.00263157894737,
"grad_norm": 5.025905132293701,
"learning_rate": 2.2076023391812867e-05,
"loss": 0.0985,
"step": 2290
},
{
"epoch": 30.00526315789474,
"grad_norm": 3.686487913131714,
"learning_rate": 2.1929824561403507e-05,
"loss": 0.1349,
"step": 2300
},
{
"epoch": 30.007894736842104,
"grad_norm": 3.4678690433502197,
"learning_rate": 2.1783625730994154e-05,
"loss": 0.0814,
"step": 2310
},
{
"epoch": 30.010526315789473,
"grad_norm": 3.107570171356201,
"learning_rate": 2.1637426900584794e-05,
"loss": 0.1936,
"step": 2320
},
{
"epoch": 30.013157894736842,
"grad_norm": 2.725902795791626,
"learning_rate": 2.149122807017544e-05,
"loss": 0.1985,
"step": 2330
},
{
"epoch": 30.01578947368421,
"grad_norm": 3.2765145301818848,
"learning_rate": 2.134502923976608e-05,
"loss": 0.1352,
"step": 2340
},
{
"epoch": 30.018421052631577,
"grad_norm": 16.57495880126953,
"learning_rate": 2.1198830409356725e-05,
"loss": 0.2021,
"step": 2350
},
{
"epoch": 30.02,
"eval_accuracy": 0.7165354330708661,
"eval_loss": 1.139369010925293,
"eval_runtime": 219.7556,
"eval_samples_per_second": 0.578,
"eval_steps_per_second": 0.073,
"step": 2356
},
{
"epoch": 31.001052631578947,
"grad_norm": 0.4346730709075928,
"learning_rate": 2.105263157894737e-05,
"loss": 0.1554,
"step": 2360
},
{
"epoch": 31.003684210526316,
"grad_norm": 15.532111167907715,
"learning_rate": 2.0906432748538013e-05,
"loss": 0.1262,
"step": 2370
},
{
"epoch": 31.006315789473685,
"grad_norm": 3.0062544345855713,
"learning_rate": 2.0760233918128656e-05,
"loss": 0.1561,
"step": 2380
},
{
"epoch": 31.00894736842105,
"grad_norm": 15.58200740814209,
"learning_rate": 2.06140350877193e-05,
"loss": 0.1591,
"step": 2390
},
{
"epoch": 31.01157894736842,
"grad_norm": 9.851778984069824,
"learning_rate": 2.046783625730994e-05,
"loss": 0.1295,
"step": 2400
},
{
"epoch": 31.01421052631579,
"grad_norm": 1.939591646194458,
"learning_rate": 2.0321637426900587e-05,
"loss": 0.1785,
"step": 2410
},
{
"epoch": 31.01684210526316,
"grad_norm": 13.707688331604004,
"learning_rate": 2.0175438596491227e-05,
"loss": 0.1499,
"step": 2420
},
{
"epoch": 31.019473684210528,
"grad_norm": 1.3323267698287964,
"learning_rate": 2.0029239766081874e-05,
"loss": 0.0999,
"step": 2430
},
{
"epoch": 31.02,
"eval_accuracy": 0.7716535433070866,
"eval_loss": 0.7838273644447327,
"eval_runtime": 221.1151,
"eval_samples_per_second": 0.574,
"eval_steps_per_second": 0.072,
"step": 2432
},
{
"epoch": 32.002105263157894,
"grad_norm": 1.2039517164230347,
"learning_rate": 1.9883040935672515e-05,
"loss": 0.1384,
"step": 2440
},
{
"epoch": 32.00473684210527,
"grad_norm": 4.4641499519348145,
"learning_rate": 1.9736842105263158e-05,
"loss": 0.2209,
"step": 2450
},
{
"epoch": 32.00736842105263,
"grad_norm": 5.985694885253906,
"learning_rate": 1.9590643274853802e-05,
"loss": 0.1737,
"step": 2460
},
{
"epoch": 32.01,
"grad_norm": 9.976003646850586,
"learning_rate": 1.9444444444444445e-05,
"loss": 0.1998,
"step": 2470
},
{
"epoch": 32.01263157894737,
"grad_norm": 9.69500732421875,
"learning_rate": 1.929824561403509e-05,
"loss": 0.2171,
"step": 2480
},
{
"epoch": 32.015263157894736,
"grad_norm": 21.551578521728516,
"learning_rate": 1.9152046783625733e-05,
"loss": 0.1829,
"step": 2490
},
{
"epoch": 32.0178947368421,
"grad_norm": 9.649386405944824,
"learning_rate": 1.9005847953216373e-05,
"loss": 0.1473,
"step": 2500
},
{
"epoch": 32.02,
"eval_accuracy": 0.6456692913385826,
"eval_loss": 1.3808568716049194,
"eval_runtime": 212.0868,
"eval_samples_per_second": 0.599,
"eval_steps_per_second": 0.075,
"step": 2508
},
{
"epoch": 33.00052631578947,
"grad_norm": 0.5508492588996887,
"learning_rate": 1.885964912280702e-05,
"loss": 0.1658,
"step": 2510
},
{
"epoch": 33.003157894736844,
"grad_norm": 1.3304760456085205,
"learning_rate": 1.871345029239766e-05,
"loss": 0.1968,
"step": 2520
},
{
"epoch": 33.00578947368421,
"grad_norm": 35.01847457885742,
"learning_rate": 1.8567251461988304e-05,
"loss": 0.1615,
"step": 2530
},
{
"epoch": 33.008421052631576,
"grad_norm": 5.112392902374268,
"learning_rate": 1.8421052631578947e-05,
"loss": 0.1648,
"step": 2540
},
{
"epoch": 33.01105263157895,
"grad_norm": 2.015471935272217,
"learning_rate": 1.827485380116959e-05,
"loss": 0.2019,
"step": 2550
},
{
"epoch": 33.013684210526314,
"grad_norm": 0.9456290602684021,
"learning_rate": 1.8128654970760235e-05,
"loss": 0.1382,
"step": 2560
},
{
"epoch": 33.01631578947369,
"grad_norm": 5.0300164222717285,
"learning_rate": 1.7982456140350878e-05,
"loss": 0.1313,
"step": 2570
},
{
"epoch": 33.01894736842105,
"grad_norm": 0.5818091034889221,
"learning_rate": 1.7836257309941522e-05,
"loss": 0.0939,
"step": 2580
},
{
"epoch": 33.02,
"eval_accuracy": 0.7874015748031497,
"eval_loss": 0.783892035484314,
"eval_runtime": 217.5033,
"eval_samples_per_second": 0.584,
"eval_steps_per_second": 0.074,
"step": 2584
},
{
"epoch": 34.00157894736842,
"grad_norm": 1.293542504310608,
"learning_rate": 1.7690058479532165e-05,
"loss": 0.0838,
"step": 2590
},
{
"epoch": 34.00421052631579,
"grad_norm": 15.003107070922852,
"learning_rate": 1.7543859649122806e-05,
"loss": 0.2212,
"step": 2600
},
{
"epoch": 34.00684210526316,
"grad_norm": 1.133008599281311,
"learning_rate": 1.7397660818713453e-05,
"loss": 0.1399,
"step": 2610
},
{
"epoch": 34.009473684210526,
"grad_norm": 14.774809837341309,
"learning_rate": 1.7251461988304093e-05,
"loss": 0.1712,
"step": 2620
},
{
"epoch": 34.01210526315789,
"grad_norm": 0.46233054995536804,
"learning_rate": 1.7105263157894737e-05,
"loss": 0.0984,
"step": 2630
},
{
"epoch": 34.014736842105265,
"grad_norm": 1.898353934288025,
"learning_rate": 1.695906432748538e-05,
"loss": 0.0641,
"step": 2640
},
{
"epoch": 34.01736842105263,
"grad_norm": 0.8380947709083557,
"learning_rate": 1.6812865497076024e-05,
"loss": 0.2255,
"step": 2650
},
{
"epoch": 34.02,
"grad_norm": 16.234495162963867,
"learning_rate": 1.6666666666666667e-05,
"loss": 0.0952,
"step": 2660
},
{
"epoch": 34.02,
"eval_accuracy": 0.7007874015748031,
"eval_loss": 1.0635521411895752,
"eval_runtime": 209.1412,
"eval_samples_per_second": 0.607,
"eval_steps_per_second": 0.077,
"step": 2660
},
{
"epoch": 35.002631578947366,
"grad_norm": 0.17983902990818024,
"learning_rate": 1.652046783625731e-05,
"loss": 0.1539,
"step": 2670
},
{
"epoch": 35.00526315789474,
"grad_norm": 8.456697463989258,
"learning_rate": 1.6374269005847955e-05,
"loss": 0.0903,
"step": 2680
},
{
"epoch": 35.007894736842104,
"grad_norm": 7.233275413513184,
"learning_rate": 1.62280701754386e-05,
"loss": 0.206,
"step": 2690
},
{
"epoch": 35.01052631578948,
"grad_norm": 0.25833821296691895,
"learning_rate": 1.608187134502924e-05,
"loss": 0.1998,
"step": 2700
},
{
"epoch": 35.01315789473684,
"grad_norm": 0.7079264521598816,
"learning_rate": 1.5935672514619886e-05,
"loss": 0.1462,
"step": 2710
},
{
"epoch": 35.01578947368421,
"grad_norm": 52.23944091796875,
"learning_rate": 1.5789473684210526e-05,
"loss": 0.2127,
"step": 2720
},
{
"epoch": 35.01842105263158,
"grad_norm": 17.99128532409668,
"learning_rate": 1.564327485380117e-05,
"loss": 0.2684,
"step": 2730
},
{
"epoch": 35.02,
"eval_accuracy": 0.7322834645669292,
"eval_loss": 0.9193896651268005,
"eval_runtime": 222.8863,
"eval_samples_per_second": 0.57,
"eval_steps_per_second": 0.072,
"step": 2736
},
{
"epoch": 36.00105263157895,
"grad_norm": 5.967014789581299,
"learning_rate": 1.5497076023391813e-05,
"loss": 0.1196,
"step": 2740
},
{
"epoch": 36.003684210526316,
"grad_norm": 35.71928024291992,
"learning_rate": 1.5350877192982457e-05,
"loss": 0.1805,
"step": 2750
},
{
"epoch": 36.00631578947368,
"grad_norm": 0.19552262127399445,
"learning_rate": 1.5204678362573099e-05,
"loss": 0.069,
"step": 2760
},
{
"epoch": 36.008947368421055,
"grad_norm": 0.09623876214027405,
"learning_rate": 1.5058479532163744e-05,
"loss": 0.1688,
"step": 2770
},
{
"epoch": 36.01157894736842,
"grad_norm": 4.143298625946045,
"learning_rate": 1.4912280701754386e-05,
"loss": 0.1548,
"step": 2780
},
{
"epoch": 36.01421052631579,
"grad_norm": 0.08483076095581055,
"learning_rate": 1.4766081871345031e-05,
"loss": 0.1321,
"step": 2790
},
{
"epoch": 36.01684210526316,
"grad_norm": 27.361652374267578,
"learning_rate": 1.4619883040935673e-05,
"loss": 0.1274,
"step": 2800
},
{
"epoch": 36.019473684210524,
"grad_norm": 0.30415764451026917,
"learning_rate": 1.4473684210526317e-05,
"loss": 0.1628,
"step": 2810
},
{
"epoch": 36.02,
"eval_accuracy": 0.8031496062992126,
"eval_loss": 0.7345768809318542,
"eval_runtime": 218.0492,
"eval_samples_per_second": 0.582,
"eval_steps_per_second": 0.073,
"step": 2812
},
{
"epoch": 37.002105263157894,
"grad_norm": 1.5019361972808838,
"learning_rate": 1.4327485380116959e-05,
"loss": 0.111,
"step": 2820
},
{
"epoch": 37.00473684210527,
"grad_norm": 20.669527053833008,
"learning_rate": 1.4181286549707604e-05,
"loss": 0.0966,
"step": 2830
},
{
"epoch": 37.00736842105263,
"grad_norm": 8.103653907775879,
"learning_rate": 1.4035087719298246e-05,
"loss": 0.1641,
"step": 2840
},
{
"epoch": 37.01,
"grad_norm": 19.656436920166016,
"learning_rate": 1.388888888888889e-05,
"loss": 0.0822,
"step": 2850
},
{
"epoch": 37.01263157894737,
"grad_norm": 4.070357799530029,
"learning_rate": 1.3742690058479531e-05,
"loss": 0.1132,
"step": 2860
},
{
"epoch": 37.015263157894736,
"grad_norm": 2.3238236904144287,
"learning_rate": 1.3596491228070177e-05,
"loss": 0.1275,
"step": 2870
},
{
"epoch": 37.0178947368421,
"grad_norm": 0.8202667236328125,
"learning_rate": 1.3450292397660819e-05,
"loss": 0.0584,
"step": 2880
},
{
"epoch": 37.02,
"eval_accuracy": 0.7322834645669292,
"eval_loss": 1.0111991167068481,
"eval_runtime": 215.1627,
"eval_samples_per_second": 0.59,
"eval_steps_per_second": 0.074,
"step": 2888
},
{
"epoch": 38.00052631578947,
"grad_norm": 8.352115631103516,
"learning_rate": 1.3304093567251464e-05,
"loss": 0.1348,
"step": 2890
},
{
"epoch": 38.003157894736844,
"grad_norm": 3.482306480407715,
"learning_rate": 1.3157894736842106e-05,
"loss": 0.1314,
"step": 2900
},
{
"epoch": 38.00578947368421,
"grad_norm": 3.1589252948760986,
"learning_rate": 1.301169590643275e-05,
"loss": 0.0665,
"step": 2910
},
{
"epoch": 38.008421052631576,
"grad_norm": 2.0633227825164795,
"learning_rate": 1.2865497076023392e-05,
"loss": 0.0588,
"step": 2920
},
{
"epoch": 38.01105263157895,
"grad_norm": 15.660873413085938,
"learning_rate": 1.2719298245614037e-05,
"loss": 0.0872,
"step": 2930
},
{
"epoch": 38.013684210526314,
"grad_norm": 0.21239978075027466,
"learning_rate": 1.2573099415204679e-05,
"loss": 0.0914,
"step": 2940
},
{
"epoch": 38.01631578947369,
"grad_norm": 3.8502485752105713,
"learning_rate": 1.242690058479532e-05,
"loss": 0.0803,
"step": 2950
},
{
"epoch": 38.01894736842105,
"grad_norm": 0.7816948890686035,
"learning_rate": 1.2280701754385964e-05,
"loss": 0.0567,
"step": 2960
},
{
"epoch": 38.02,
"eval_accuracy": 0.7322834645669292,
"eval_loss": 1.058438777923584,
"eval_runtime": 210.151,
"eval_samples_per_second": 0.604,
"eval_steps_per_second": 0.076,
"step": 2964
},
{
"epoch": 39.00157894736842,
"grad_norm": 1.5109614133834839,
"learning_rate": 1.2134502923976608e-05,
"loss": 0.1519,
"step": 2970
},
{
"epoch": 39.00421052631579,
"grad_norm": 23.389062881469727,
"learning_rate": 1.1988304093567252e-05,
"loss": 0.1978,
"step": 2980
},
{
"epoch": 39.00684210526316,
"grad_norm": 2.617483139038086,
"learning_rate": 1.1842105263157895e-05,
"loss": 0.3215,
"step": 2990
},
{
"epoch": 39.009473684210526,
"grad_norm": 2.0149471759796143,
"learning_rate": 1.1695906432748537e-05,
"loss": 0.0918,
"step": 3000
},
{
"epoch": 39.01210526315789,
"grad_norm": 0.3655383288860321,
"learning_rate": 1.154970760233918e-05,
"loss": 0.0587,
"step": 3010
},
{
"epoch": 39.014736842105265,
"grad_norm": 23.13666343688965,
"learning_rate": 1.1403508771929824e-05,
"loss": 0.1863,
"step": 3020
},
{
"epoch": 39.01736842105263,
"grad_norm": 0.5490059852600098,
"learning_rate": 1.1257309941520468e-05,
"loss": 0.0516,
"step": 3030
},
{
"epoch": 39.02,
"grad_norm": 6.660675048828125,
"learning_rate": 1.1111111111111112e-05,
"loss": 0.1358,
"step": 3040
},
{
"epoch": 39.02,
"eval_accuracy": 0.7322834645669292,
"eval_loss": 1.05661940574646,
"eval_runtime": 208.362,
"eval_samples_per_second": 0.61,
"eval_steps_per_second": 0.077,
"step": 3040
},
{
"epoch": 40.002631578947366,
"grad_norm": 0.15694499015808105,
"learning_rate": 1.0964912280701754e-05,
"loss": 0.2102,
"step": 3050
},
{
"epoch": 40.00526315789474,
"grad_norm": 21.509859085083008,
"learning_rate": 1.0818713450292397e-05,
"loss": 0.107,
"step": 3060
},
{
"epoch": 40.007894736842104,
"grad_norm": 13.848420143127441,
"learning_rate": 1.067251461988304e-05,
"loss": 0.118,
"step": 3070
},
{
"epoch": 40.01052631578948,
"grad_norm": 0.11400782316923141,
"learning_rate": 1.0526315789473684e-05,
"loss": 0.1119,
"step": 3080
},
{
"epoch": 40.01315789473684,
"grad_norm": 27.989046096801758,
"learning_rate": 1.0380116959064328e-05,
"loss": 0.2483,
"step": 3090
},
{
"epoch": 40.01578947368421,
"grad_norm": 3.2790133953094482,
"learning_rate": 1.023391812865497e-05,
"loss": 0.0518,
"step": 3100
},
{
"epoch": 40.01842105263158,
"grad_norm": 1.107125163078308,
"learning_rate": 1.0087719298245614e-05,
"loss": 0.0796,
"step": 3110
},
{
"epoch": 40.02,
"eval_accuracy": 0.7480314960629921,
"eval_loss": 0.9323013424873352,
"eval_runtime": 206.0517,
"eval_samples_per_second": 0.616,
"eval_steps_per_second": 0.078,
"step": 3116
},
{
"epoch": 41.00105263157895,
"grad_norm": 0.9045488238334656,
"learning_rate": 9.941520467836257e-06,
"loss": 0.1818,
"step": 3120
},
{
"epoch": 41.003684210526316,
"grad_norm": 6.538638114929199,
"learning_rate": 9.795321637426901e-06,
"loss": 0.1703,
"step": 3130
},
{
"epoch": 41.00631578947368,
"grad_norm": 4.6542277336120605,
"learning_rate": 9.649122807017545e-06,
"loss": 0.1232,
"step": 3140
},
{
"epoch": 41.008947368421055,
"grad_norm": 0.4844169318675995,
"learning_rate": 9.502923976608186e-06,
"loss": 0.1386,
"step": 3150
},
{
"epoch": 41.01157894736842,
"grad_norm": 0.5313725471496582,
"learning_rate": 9.35672514619883e-06,
"loss": 0.0692,
"step": 3160
},
{
"epoch": 41.01421052631579,
"grad_norm": 0.22919414937496185,
"learning_rate": 9.210526315789474e-06,
"loss": 0.0188,
"step": 3170
},
{
"epoch": 41.01684210526316,
"grad_norm": 2.425123929977417,
"learning_rate": 9.064327485380117e-06,
"loss": 0.0582,
"step": 3180
},
{
"epoch": 41.019473684210524,
"grad_norm": 0.15257947146892548,
"learning_rate": 8.918128654970761e-06,
"loss": 0.0828,
"step": 3190
},
{
"epoch": 41.02,
"eval_accuracy": 0.7952755905511811,
"eval_loss": 0.7611303925514221,
"eval_runtime": 218.1076,
"eval_samples_per_second": 0.582,
"eval_steps_per_second": 0.073,
"step": 3192
},
{
"epoch": 42.002105263157894,
"grad_norm": 0.6726482510566711,
"learning_rate": 8.771929824561403e-06,
"loss": 0.1402,
"step": 3200
},
{
"epoch": 42.00473684210527,
"grad_norm": 15.30670166015625,
"learning_rate": 8.625730994152046e-06,
"loss": 0.0809,
"step": 3210
},
{
"epoch": 42.00736842105263,
"grad_norm": 3.397010326385498,
"learning_rate": 8.47953216374269e-06,
"loss": 0.1602,
"step": 3220
},
{
"epoch": 42.01,
"grad_norm": 19.94878387451172,
"learning_rate": 8.333333333333334e-06,
"loss": 0.086,
"step": 3230
},
{
"epoch": 42.01263157894737,
"grad_norm": 8.212011337280273,
"learning_rate": 8.187134502923977e-06,
"loss": 0.143,
"step": 3240
},
{
"epoch": 42.015263157894736,
"grad_norm": 4.16144323348999,
"learning_rate": 8.04093567251462e-06,
"loss": 0.0462,
"step": 3250
},
{
"epoch": 42.0178947368421,
"grad_norm": 1.0098721981048584,
"learning_rate": 7.894736842105263e-06,
"loss": 0.0661,
"step": 3260
},
{
"epoch": 42.02,
"eval_accuracy": 0.7874015748031497,
"eval_loss": 0.7284496426582336,
"eval_runtime": 213.0991,
"eval_samples_per_second": 0.596,
"eval_steps_per_second": 0.075,
"step": 3268
},
{
"epoch": 43.00052631578947,
"grad_norm": 3.563675880432129,
"learning_rate": 7.748538011695907e-06,
"loss": 0.0854,
"step": 3270
},
{
"epoch": 43.003157894736844,
"grad_norm": 23.9377384185791,
"learning_rate": 7.602339181286549e-06,
"loss": 0.1174,
"step": 3280
},
{
"epoch": 43.00578947368421,
"grad_norm": 5.841782093048096,
"learning_rate": 7.456140350877193e-06,
"loss": 0.0652,
"step": 3290
},
{
"epoch": 43.008421052631576,
"grad_norm": 3.5263381004333496,
"learning_rate": 7.3099415204678366e-06,
"loss": 0.107,
"step": 3300
},
{
"epoch": 43.01105263157895,
"grad_norm": 0.21935203671455383,
"learning_rate": 7.163742690058479e-06,
"loss": 0.1081,
"step": 3310
},
{
"epoch": 43.013684210526314,
"grad_norm": 0.27123141288757324,
"learning_rate": 7.017543859649123e-06,
"loss": 0.1304,
"step": 3320
},
{
"epoch": 43.01631578947369,
"grad_norm": 0.10241150110960007,
"learning_rate": 6.871345029239766e-06,
"loss": 0.0419,
"step": 3330
},
{
"epoch": 43.01894736842105,
"grad_norm": 0.14180737733840942,
"learning_rate": 6.725146198830409e-06,
"loss": 0.0882,
"step": 3340
},
{
"epoch": 43.02,
"eval_accuracy": 0.7952755905511811,
"eval_loss": 0.6982414722442627,
"eval_runtime": 211.2161,
"eval_samples_per_second": 0.601,
"eval_steps_per_second": 0.076,
"step": 3344
},
{
"epoch": 44.00157894736842,
"grad_norm": 3.263852834701538,
"learning_rate": 6.578947368421053e-06,
"loss": 0.0781,
"step": 3350
},
{
"epoch": 44.00421052631579,
"grad_norm": 2.984760284423828,
"learning_rate": 6.432748538011696e-06,
"loss": 0.0315,
"step": 3360
},
{
"epoch": 44.00684210526316,
"grad_norm": 0.09511619806289673,
"learning_rate": 6.286549707602339e-06,
"loss": 0.0384,
"step": 3370
},
{
"epoch": 44.009473684210526,
"grad_norm": 2.247210741043091,
"learning_rate": 6.140350877192982e-06,
"loss": 0.0445,
"step": 3380
},
{
"epoch": 44.01210526315789,
"grad_norm": 31.92279052734375,
"learning_rate": 5.994152046783626e-06,
"loss": 0.1253,
"step": 3390
},
{
"epoch": 44.014736842105265,
"grad_norm": 2.3949315547943115,
"learning_rate": 5.8479532163742686e-06,
"loss": 0.077,
"step": 3400
},
{
"epoch": 44.01736842105263,
"grad_norm": 0.148012176156044,
"learning_rate": 5.701754385964912e-06,
"loss": 0.0954,
"step": 3410
},
{
"epoch": 44.02,
"grad_norm": 0.5894277691841125,
"learning_rate": 5.555555555555556e-06,
"loss": 0.0398,
"step": 3420
},
{
"epoch": 44.02,
"eval_accuracy": 0.7716535433070866,
"eval_loss": 0.8585867285728455,
"eval_runtime": 217.9943,
"eval_samples_per_second": 0.583,
"eval_steps_per_second": 0.073,
"step": 3420
},
{
"epoch": 45.002631578947366,
"grad_norm": 1.641733169555664,
"learning_rate": 5.409356725146199e-06,
"loss": 0.0984,
"step": 3430
},
{
"epoch": 45.00526315789474,
"grad_norm": 1.107046127319336,
"learning_rate": 5.263157894736842e-06,
"loss": 0.0461,
"step": 3440
},
{
"epoch": 45.007894736842104,
"grad_norm": 4.157370567321777,
"learning_rate": 5.116959064327485e-06,
"loss": 0.0995,
"step": 3450
},
{
"epoch": 45.01052631578948,
"grad_norm": 2.8948891162872314,
"learning_rate": 4.970760233918129e-06,
"loss": 0.0663,
"step": 3460
},
{
"epoch": 45.01315789473684,
"grad_norm": 1.7566790580749512,
"learning_rate": 4.824561403508772e-06,
"loss": 0.051,
"step": 3470
},
{
"epoch": 45.01578947368421,
"grad_norm": 20.800312042236328,
"learning_rate": 4.678362573099415e-06,
"loss": 0.0804,
"step": 3480
},
{
"epoch": 45.01842105263158,
"grad_norm": 0.07893835008144379,
"learning_rate": 4.532163742690059e-06,
"loss": 0.2085,
"step": 3490
},
{
"epoch": 45.02,
"eval_accuracy": 0.7716535433070866,
"eval_loss": 0.7989789247512817,
"eval_runtime": 208.7877,
"eval_samples_per_second": 0.608,
"eval_steps_per_second": 0.077,
"step": 3496
},
{
"epoch": 46.00105263157895,
"grad_norm": 22.98341941833496,
"learning_rate": 4.3859649122807014e-06,
"loss": 0.0652,
"step": 3500
},
{
"epoch": 46.003684210526316,
"grad_norm": 2.6828200817108154,
"learning_rate": 4.239766081871345e-06,
"loss": 0.0426,
"step": 3510
},
{
"epoch": 46.00631578947368,
"grad_norm": 7.188971996307373,
"learning_rate": 4.093567251461989e-06,
"loss": 0.0808,
"step": 3520
},
{
"epoch": 46.008947368421055,
"grad_norm": 0.5306025147438049,
"learning_rate": 3.9473684210526315e-06,
"loss": 0.0881,
"step": 3530
},
{
"epoch": 46.01157894736842,
"grad_norm": 1.7458972930908203,
"learning_rate": 3.8011695906432747e-06,
"loss": 0.0808,
"step": 3540
},
{
"epoch": 46.01421052631579,
"grad_norm": 0.7338550686836243,
"learning_rate": 3.6549707602339183e-06,
"loss": 0.0545,
"step": 3550
},
{
"epoch": 46.01684210526316,
"grad_norm": 1.4630274772644043,
"learning_rate": 3.5087719298245615e-06,
"loss": 0.119,
"step": 3560
},
{
"epoch": 46.019473684210524,
"grad_norm": 0.12060370296239853,
"learning_rate": 3.3625730994152047e-06,
"loss": 0.0509,
"step": 3570
},
{
"epoch": 46.02,
"eval_accuracy": 0.8267716535433071,
"eval_loss": 0.7133814096450806,
"eval_runtime": 215.9384,
"eval_samples_per_second": 0.588,
"eval_steps_per_second": 0.074,
"step": 3572
},
{
"epoch": 47.002105263157894,
"grad_norm": 4.411724090576172,
"learning_rate": 3.216374269005848e-06,
"loss": 0.1346,
"step": 3580
},
{
"epoch": 47.00473684210527,
"grad_norm": 0.7380800843238831,
"learning_rate": 3.070175438596491e-06,
"loss": 0.0715,
"step": 3590
},
{
"epoch": 47.00736842105263,
"grad_norm": 3.584591865539551,
"learning_rate": 2.9239766081871343e-06,
"loss": 0.1098,
"step": 3600
},
{
"epoch": 47.01,
"grad_norm": 20.62811851501465,
"learning_rate": 2.777777777777778e-06,
"loss": 0.0818,
"step": 3610
},
{
"epoch": 47.01263157894737,
"grad_norm": 12.115966796875,
"learning_rate": 2.631578947368421e-06,
"loss": 0.0649,
"step": 3620
},
{
"epoch": 47.015263157894736,
"grad_norm": 0.42892947793006897,
"learning_rate": 2.4853801169590643e-06,
"loss": 0.0611,
"step": 3630
},
{
"epoch": 47.0178947368421,
"grad_norm": 29.426136016845703,
"learning_rate": 2.3391812865497075e-06,
"loss": 0.0791,
"step": 3640
},
{
"epoch": 47.02,
"eval_accuracy": 0.8188976377952756,
"eval_loss": 0.6887122988700867,
"eval_runtime": 227.1782,
"eval_samples_per_second": 0.559,
"eval_steps_per_second": 0.07,
"step": 3648
},
{
"epoch": 48.00052631578947,
"grad_norm": 0.11056993901729584,
"learning_rate": 2.1929824561403507e-06,
"loss": 0.1203,
"step": 3650
},
{
"epoch": 48.003157894736844,
"grad_norm": 3.5159361362457275,
"learning_rate": 2.0467836257309943e-06,
"loss": 0.0703,
"step": 3660
},
{
"epoch": 48.00578947368421,
"grad_norm": 0.193506121635437,
"learning_rate": 1.9005847953216373e-06,
"loss": 0.1127,
"step": 3670
},
{
"epoch": 48.008421052631576,
"grad_norm": 0.2963356673717499,
"learning_rate": 1.7543859649122807e-06,
"loss": 0.0622,
"step": 3680
},
{
"epoch": 48.01105263157895,
"grad_norm": 0.357128381729126,
"learning_rate": 1.608187134502924e-06,
"loss": 0.0157,
"step": 3690
},
{
"epoch": 48.013684210526314,
"grad_norm": 5.457529067993164,
"learning_rate": 1.4619883040935671e-06,
"loss": 0.0483,
"step": 3700
},
{
"epoch": 48.01631578947369,
"grad_norm": 32.87456512451172,
"learning_rate": 1.3157894736842106e-06,
"loss": 0.0877,
"step": 3710
},
{
"epoch": 48.01894736842105,
"grad_norm": 12.573355674743652,
"learning_rate": 1.1695906432748538e-06,
"loss": 0.0469,
"step": 3720
},
{
"epoch": 48.02,
"eval_accuracy": 0.8031496062992126,
"eval_loss": 0.7159085273742676,
"eval_runtime": 224.784,
"eval_samples_per_second": 0.565,
"eval_steps_per_second": 0.071,
"step": 3724
},
{
"epoch": 49.00157894736842,
"grad_norm": 0.6939800381660461,
"learning_rate": 1.0233918128654972e-06,
"loss": 0.1366,
"step": 3730
},
{
"epoch": 49.00421052631579,
"grad_norm": 1.9841969013214111,
"learning_rate": 8.771929824561404e-07,
"loss": 0.0566,
"step": 3740
},
{
"epoch": 49.00684210526316,
"grad_norm": 24.58515739440918,
"learning_rate": 7.309941520467836e-07,
"loss": 0.1208,
"step": 3750
},
{
"epoch": 49.009473684210526,
"grad_norm": 0.08509305864572525,
"learning_rate": 5.847953216374269e-07,
"loss": 0.071,
"step": 3760
},
{
"epoch": 49.01210526315789,
"grad_norm": 3.9984312057495117,
"learning_rate": 4.385964912280702e-07,
"loss": 0.058,
"step": 3770
},
{
"epoch": 49.014736842105265,
"grad_norm": 0.45436879992485046,
"learning_rate": 2.9239766081871344e-07,
"loss": 0.1048,
"step": 3780
},
{
"epoch": 49.01736842105263,
"grad_norm": 0.8313738703727722,
"learning_rate": 1.4619883040935672e-07,
"loss": 0.0478,
"step": 3790
},
{
"epoch": 49.02,
"grad_norm": 0.8988305926322937,
"learning_rate": 0.0,
"loss": 0.0621,
"step": 3800
},
{
"epoch": 49.02,
"eval_accuracy": 0.8031496062992126,
"eval_loss": 0.7061663866043091,
"eval_runtime": 216.7444,
"eval_samples_per_second": 0.586,
"eval_steps_per_second": 0.074,
"step": 3800
},
{
"epoch": 49.02,
"step": 3800,
"total_flos": 3.7892187304073626e+19,
"train_loss": 0.6421483513399174,
"train_runtime": 65613.5675,
"train_samples_per_second": 0.463,
"train_steps_per_second": 0.058
},
{
"epoch": 49.02,
"eval_accuracy": 0.6838709677419355,
"eval_loss": 1.2759393453598022,
"eval_runtime": 270.5851,
"eval_samples_per_second": 0.573,
"eval_steps_per_second": 0.074,
"step": 3800
},
{
"epoch": 49.02,
"eval_accuracy": 0.6838709677419355,
"eval_loss": 1.2759393453598022,
"eval_runtime": 271.1828,
"eval_samples_per_second": 0.572,
"eval_steps_per_second": 0.074,
"step": 3800
}
],
"logging_steps": 10,
"max_steps": 3800,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 500,
"total_flos": 3.7892187304073626e+19,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}