da-large / trainer_state.json
dreamerdeo's picture
init
c185259
raw
history blame contribute delete
No virus
25.3 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 32.78688524590164,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.16,
"learning_rate": 2.4999999999999998e-05,
"loss": 4.5271,
"step": 10
},
{
"epoch": 0.33,
"learning_rate": 3.2525749891599525e-05,
"loss": 3.9027,
"step": 20
},
{
"epoch": 0.49,
"learning_rate": 3.6928031367991554e-05,
"loss": 3.4367,
"step": 30
},
{
"epoch": 0.66,
"learning_rate": 4.005149978319905e-05,
"loss": 3.3343,
"step": 40
},
{
"epoch": 0.82,
"learning_rate": 4.247425010840046e-05,
"loss": 3.2186,
"step": 50
},
{
"epoch": 0.98,
"learning_rate": 4.445378125959108e-05,
"loss": 3.1259,
"step": 60
},
{
"epoch": 1.15,
"learning_rate": 4.612745100035642e-05,
"loss": 2.9406,
"step": 70
},
{
"epoch": 1.31,
"learning_rate": 4.757724967479858e-05,
"loss": 2.8567,
"step": 80
},
{
"epoch": 1.48,
"learning_rate": 4.885606273598312e-05,
"loss": 2.8893,
"step": 90
},
{
"epoch": 1.64,
"learning_rate": 4.9999999999999996e-05,
"loss": 2.9067,
"step": 100
},
{
"epoch": 1.8,
"learning_rate": 5.1034817128955624e-05,
"loss": 2.813,
"step": 110
},
{
"epoch": 1.97,
"learning_rate": 5.197953115119061e-05,
"loss": 2.8364,
"step": 120
},
{
"epoch": 2.13,
"learning_rate": 5.2848583807670914e-05,
"loss": 2.7439,
"step": 130
},
{
"epoch": 2.3,
"learning_rate": 5.365320089195593e-05,
"loss": 2.7048,
"step": 140
},
{
"epoch": 2.46,
"learning_rate": 5.4402281476392025e-05,
"loss": 2.6631,
"step": 150
},
{
"epoch": 2.62,
"learning_rate": 5.5102999566398106e-05,
"loss": 2.6804,
"step": 160
},
{
"epoch": 2.79,
"learning_rate": 5.5761223034456847e-05,
"loss": 2.6094,
"step": 170
},
{
"epoch": 2.95,
"learning_rate": 5.6381812627582644e-05,
"loss": 2.6055,
"step": 180
},
{
"epoch": 3.11,
"learning_rate": 5.696884002382071e-05,
"loss": 2.4945,
"step": 190
},
{
"epoch": 3.28,
"learning_rate": 5.752574989159952e-05,
"loss": 2.5523,
"step": 200
},
{
"epoch": 3.44,
"learning_rate": 5.805548236834797e-05,
"loss": 2.5406,
"step": 210
},
{
"epoch": 3.61,
"learning_rate": 5.856056702055516e-05,
"loss": 2.5444,
"step": 220
},
{
"epoch": 3.77,
"learning_rate": 5.9043195900439815e-05,
"loss": 2.568,
"step": 230
},
{
"epoch": 3.93,
"learning_rate": 5.950528104279014e-05,
"loss": 2.5305,
"step": 240
},
{
"epoch": 4.1,
"learning_rate": 5.9948500216800926e-05,
"loss": 2.4653,
"step": 250
},
{
"epoch": 4.26,
"learning_rate": 6.037433369927045e-05,
"loss": 2.4644,
"step": 260
},
{
"epoch": 4.43,
"learning_rate": 6.078409410397467e-05,
"loss": 2.4302,
"step": 270
},
{
"epoch": 4.59,
"learning_rate": 6.117895078355547e-05,
"loss": 2.4025,
"step": 280
},
{
"epoch": 4.75,
"learning_rate": 6.15599499474739e-05,
"loss": 2.4185,
"step": 290
},
{
"epoch": 4.92,
"learning_rate": 6.192803136799156e-05,
"loss": 2.4651,
"step": 300
},
{
"epoch": 5.08,
"learning_rate": 6.22840423458568e-05,
"loss": 2.4095,
"step": 310
},
{
"epoch": 5.25,
"learning_rate": 6.262874945799764e-05,
"loss": 2.3613,
"step": 320
},
{
"epoch": 5.41,
"learning_rate": 6.296284849694718e-05,
"loss": 2.381,
"step": 330
},
{
"epoch": 5.57,
"learning_rate": 6.328697292605637e-05,
"loss": 2.3387,
"step": 340
},
{
"epoch": 5.74,
"learning_rate": 6.360170110875688e-05,
"loss": 2.3331,
"step": 350
},
{
"epoch": 5.9,
"learning_rate": 6.390756251918218e-05,
"loss": 2.3831,
"step": 360
},
{
"epoch": 6.07,
"learning_rate": 6.420504310167487e-05,
"loss": 2.2977,
"step": 370
},
{
"epoch": 6.23,
"learning_rate": 6.449458991542025e-05,
"loss": 2.2816,
"step": 380
},
{
"epoch": 6.39,
"learning_rate": 6.477661517566246e-05,
"loss": 2.2826,
"step": 390
},
{
"epoch": 6.56,
"learning_rate": 6.505149978319905e-05,
"loss": 2.3021,
"step": 400
},
{
"epoch": 6.72,
"learning_rate": 6.531959641799339e-05,
"loss": 2.3166,
"step": 410
},
{
"epoch": 6.89,
"learning_rate": 6.55812322599475e-05,
"loss": 2.3421,
"step": 420
},
{
"epoch": 7.05,
"learning_rate": 6.583671138948966e-05,
"loss": 2.3094,
"step": 430
},
{
"epoch": 7.21,
"learning_rate": 6.608631691215467e-05,
"loss": 2.2268,
"step": 440
},
{
"epoch": 7.38,
"learning_rate": 6.633031284438359e-05,
"loss": 2.2722,
"step": 450
},
{
"epoch": 7.54,
"learning_rate": 6.656894579203935e-05,
"loss": 2.1933,
"step": 460
},
{
"epoch": 7.7,
"learning_rate": 6.680244644839293e-05,
"loss": 2.1991,
"step": 470
},
{
"epoch": 7.87,
"learning_rate": 6.703103093438967e-05,
"loss": 2.2637,
"step": 480
},
{
"epoch": 8.03,
"learning_rate": 6.725490200071283e-05,
"loss": 2.2277,
"step": 490
},
{
"epoch": 8.2,
"learning_rate": 6.747425010840046e-05,
"loss": 2.1819,
"step": 500
},
{
"epoch": 8.2,
"eval_loss": 2.786494255065918,
"eval_runtime": 13.6759,
"eval_samples_per_second": 47.602,
"eval_steps_per_second": 0.585,
"step": 500
},
{
"epoch": 8.36,
"learning_rate": 6.76892544024484e-05,
"loss": 2.1427,
"step": 510
},
{
"epoch": 8.52,
"learning_rate": 6.790008359086997e-05,
"loss": 2.173,
"step": 520
},
{
"epoch": 8.69,
"learning_rate": 6.810689674001973e-05,
"loss": 2.1895,
"step": 530
},
{
"epoch": 8.85,
"learning_rate": 6.830984399557421e-05,
"loss": 2.2101,
"step": 540
},
{
"epoch": 9.02,
"learning_rate": 6.850906723735608e-05,
"loss": 2.1926,
"step": 550
},
{
"epoch": 9.18,
"learning_rate": 6.870470067515499e-05,
"loss": 2.0861,
"step": 560
},
{
"epoch": 9.34,
"learning_rate": 6.889687139181228e-05,
"loss": 2.1092,
"step": 570
},
{
"epoch": 9.51,
"learning_rate": 6.908569983907343e-05,
"loss": 2.129,
"step": 580
},
{
"epoch": 9.67,
"learning_rate": 6.92713002910536e-05,
"loss": 2.113,
"step": 590
},
{
"epoch": 9.84,
"learning_rate": 6.945378125959108e-05,
"loss": 2.1234,
"step": 600
},
{
"epoch": 10.0,
"learning_rate": 6.963324587526918e-05,
"loss": 2.1028,
"step": 610
},
{
"epoch": 10.16,
"learning_rate": 6.980979223745634e-05,
"loss": 2.0476,
"step": 620
},
{
"epoch": 10.33,
"learning_rate": 6.998351373633953e-05,
"loss": 2.0879,
"step": 630
},
{
"epoch": 10.49,
"learning_rate": 7.015449934959717e-05,
"loss": 2.0547,
"step": 640
},
{
"epoch": 10.66,
"learning_rate": 7.032283391607138e-05,
"loss": 2.0791,
"step": 650
},
{
"epoch": 10.82,
"learning_rate": 7.048859838854671e-05,
"loss": 2.1454,
"step": 660
},
{
"epoch": 10.98,
"learning_rate": 7.065187006752065e-05,
"loss": 2.0957,
"step": 670
},
{
"epoch": 11.15,
"learning_rate": 7.08127228176559e-05,
"loss": 2.0561,
"step": 680
},
{
"epoch": 11.31,
"learning_rate": 7.097122726843138e-05,
"loss": 2.0563,
"step": 690
},
{
"epoch": 11.48,
"learning_rate": 7.112745100035642e-05,
"loss": 2.027,
"step": 700
},
{
"epoch": 11.64,
"learning_rate": 7.128145871797688e-05,
"loss": 2.0495,
"step": 710
},
{
"epoch": 11.8,
"learning_rate": 7.143331241078171e-05,
"loss": 2.019,
"step": 720
},
{
"epoch": 11.97,
"learning_rate": 7.158307150301139e-05,
"loss": 2.0242,
"step": 730
},
{
"epoch": 12.13,
"learning_rate": 7.17307929932744e-05,
"loss": 1.9962,
"step": 740
},
{
"epoch": 12.3,
"learning_rate": 7.187653158479249e-05,
"loss": 1.9971,
"step": 750
},
{
"epoch": 12.46,
"learning_rate": 7.202033980701978e-05,
"loss": 2.0236,
"step": 760
},
{
"epoch": 12.62,
"learning_rate": 7.216226812931204e-05,
"loss": 1.9923,
"step": 770
},
{
"epoch": 12.79,
"learning_rate": 7.2302365067262e-05,
"loss": 2.0244,
"step": 780
},
{
"epoch": 12.95,
"learning_rate": 7.244067728226103e-05,
"loss": 1.9846,
"step": 790
},
{
"epoch": 13.11,
"learning_rate": 7.257724967479857e-05,
"loss": 1.9811,
"step": 800
},
{
"epoch": 13.28,
"learning_rate": 7.271212547196624e-05,
"loss": 1.9709,
"step": 810
},
{
"epoch": 13.44,
"learning_rate": 7.284534630959291e-05,
"loss": 1.9652,
"step": 820
},
{
"epoch": 13.61,
"learning_rate": 7.297695230940184e-05,
"loss": 1.9605,
"step": 830
},
{
"epoch": 13.77,
"learning_rate": 7.310698215154704e-05,
"loss": 1.9692,
"step": 840
},
{
"epoch": 13.93,
"learning_rate": 7.323547314285732e-05,
"loss": 1.9945,
"step": 850
},
{
"epoch": 14.1,
"learning_rate": 7.336246128108918e-05,
"loss": 1.9222,
"step": 860
},
{
"epoch": 14.26,
"learning_rate": 7.348798131546546e-05,
"loss": 1.9283,
"step": 870
},
{
"epoch": 14.43,
"learning_rate": 7.36120668037542e-05,
"loss": 1.9376,
"step": 880
},
{
"epoch": 14.59,
"learning_rate": 7.37347501661228e-05,
"loss": 1.9247,
"step": 890
},
{
"epoch": 14.75,
"learning_rate": 7.385606273598311e-05,
"loss": 1.9218,
"step": 900
},
{
"epoch": 14.92,
"learning_rate": 7.397603480802732e-05,
"loss": 1.9492,
"step": 910
},
{
"epoch": 15.08,
"learning_rate": 7.409469568363888e-05,
"loss": 1.9235,
"step": 920
},
{
"epoch": 15.25,
"learning_rate": 7.421207371384837e-05,
"loss": 1.8671,
"step": 930
},
{
"epoch": 15.41,
"learning_rate": 7.432819633999247e-05,
"loss": 1.909,
"step": 940
},
{
"epoch": 15.57,
"learning_rate": 7.444309013222118e-05,
"loss": 1.8568,
"step": 950
},
{
"epoch": 15.74,
"learning_rate": 7.45567808259892e-05,
"loss": 1.9199,
"step": 960
},
{
"epoch": 15.9,
"learning_rate": 7.46692933566561e-05,
"loss": 1.9247,
"step": 970
},
{
"epoch": 16.07,
"learning_rate": 7.478065189231236e-05,
"loss": 1.895,
"step": 980
},
{
"epoch": 16.23,
"learning_rate": 7.489087986493874e-05,
"loss": 1.8821,
"step": 990
},
{
"epoch": 16.39,
"learning_rate": 7.5e-05,
"loss": 1.8423,
"step": 1000
},
{
"epoch": 16.39,
"eval_loss": 2.925347328186035,
"eval_runtime": 13.9926,
"eval_samples_per_second": 46.524,
"eval_steps_per_second": 0.572,
"step": 1000
},
{
"epoch": 16.56,
"learning_rate": 7.510803434456605e-05,
"loss": 1.8519,
"step": 1010
},
{
"epoch": 16.72,
"learning_rate": 7.521500429404794e-05,
"loss": 1.8578,
"step": 1020
},
{
"epoch": 16.89,
"learning_rate": 7.532093061762931e-05,
"loss": 1.8676,
"step": 1030
},
{
"epoch": 17.05,
"learning_rate": 7.54258334824695e-05,
"loss": 1.8492,
"step": 1040
},
{
"epoch": 17.21,
"learning_rate": 7.552973247674843e-05,
"loss": 1.8542,
"step": 1050
},
{
"epoch": 17.38,
"learning_rate": 7.563264663161926e-05,
"loss": 1.8312,
"step": 1060
},
{
"epoch": 17.54,
"learning_rate": 7.573459444213023e-05,
"loss": 1.8554,
"step": 1070
},
{
"epoch": 17.7,
"learning_rate": 7.583559388717374e-05,
"loss": 1.8484,
"step": 1080
},
{
"epoch": 17.87,
"learning_rate": 7.593566244851558e-05,
"loss": 1.8485,
"step": 1090
},
{
"epoch": 18.03,
"learning_rate": 7.603481712895562e-05,
"loss": 1.8505,
"step": 1100
},
{
"epoch": 18.2,
"learning_rate": 7.613307446966643e-05,
"loss": 1.8163,
"step": 1110
},
{
"epoch": 18.36,
"learning_rate": 7.623045056675453e-05,
"loss": 1.8382,
"step": 1120
},
{
"epoch": 18.52,
"learning_rate": 7.632696108708549e-05,
"loss": 1.8251,
"step": 1130
},
{
"epoch": 18.69,
"learning_rate": 7.642262128341181e-05,
"loss": 1.8252,
"step": 1140
},
{
"epoch": 18.85,
"learning_rate": 7.651744600884029e-05,
"loss": 1.849,
"step": 1150
},
{
"epoch": 19.02,
"learning_rate": 7.661144973067295e-05,
"loss": 1.8202,
"step": 1160
},
{
"epoch": 19.18,
"learning_rate": 7.670464654365404e-05,
"loss": 1.8013,
"step": 1170
},
{
"epoch": 19.34,
"learning_rate": 7.679705018265312e-05,
"loss": 1.8149,
"step": 1180
},
{
"epoch": 19.51,
"learning_rate": 7.688867403481326e-05,
"loss": 1.7919,
"step": 1190
},
{
"epoch": 19.67,
"learning_rate": 7.697953115119061e-05,
"loss": 1.801,
"step": 1200
},
{
"epoch": 19.84,
"learning_rate": 7.706963425791124e-05,
"loss": 1.8286,
"step": 1210
},
{
"epoch": 20.0,
"learning_rate": 7.71589957668687e-05,
"loss": 1.7945,
"step": 1220
},
{
"epoch": 20.16,
"learning_rate": 7.724762778598493e-05,
"loss": 1.7619,
"step": 1230
},
{
"epoch": 20.33,
"learning_rate": 7.733554212905587e-05,
"loss": 1.7693,
"step": 1240
},
{
"epoch": 20.49,
"learning_rate": 7.74227503252014e-05,
"loss": 1.7689,
"step": 1250
},
{
"epoch": 20.66,
"learning_rate": 7.750926362793907e-05,
"loss": 1.77,
"step": 1260
},
{
"epoch": 20.82,
"learning_rate": 7.759509302389892e-05,
"loss": 1.7765,
"step": 1270
},
{
"epoch": 20.98,
"learning_rate": 7.768024924119671e-05,
"loss": 1.7791,
"step": 1280
},
{
"epoch": 21.15,
"learning_rate": 7.776474275748121e-05,
"loss": 1.7514,
"step": 1290
},
{
"epoch": 21.31,
"learning_rate": 7.784858380767091e-05,
"loss": 1.7564,
"step": 1300
},
{
"epoch": 21.48,
"learning_rate": 7.793178239139409e-05,
"loss": 1.7541,
"step": 1310
},
{
"epoch": 21.64,
"learning_rate": 7.801434828014625e-05,
"loss": 1.7519,
"step": 1320
},
{
"epoch": 21.8,
"learning_rate": 7.809629102417713e-05,
"loss": 1.7862,
"step": 1330
},
{
"epoch": 21.97,
"learning_rate": 7.817761995912018e-05,
"loss": 1.7724,
"step": 1340
},
{
"epoch": 22.13,
"learning_rate": 7.825834421237515e-05,
"loss": 1.7565,
"step": 1350
},
{
"epoch": 22.3,
"learning_rate": 7.833847270925543e-05,
"loss": 1.7346,
"step": 1360
},
{
"epoch": 22.46,
"learning_rate": 7.841801417891016e-05,
"loss": 1.7238,
"step": 1370
},
{
"epoch": 22.62,
"learning_rate": 7.84969771600309e-05,
"loss": 1.738,
"step": 1380
},
{
"epoch": 22.79,
"learning_rate": 7.857537000635237e-05,
"loss": 1.7446,
"step": 1390
},
{
"epoch": 22.95,
"learning_rate": 7.865320089195594e-05,
"loss": 1.7395,
"step": 1400
},
{
"epoch": 23.11,
"learning_rate": 7.87304778163845e-05,
"loss": 1.7533,
"step": 1410
},
{
"epoch": 23.28,
"learning_rate": 7.880720860957641e-05,
"loss": 1.7101,
"step": 1420
},
{
"epoch": 23.44,
"learning_rate": 7.888340093662653e-05,
"loss": 1.7145,
"step": 1430
},
{
"epoch": 23.61,
"learning_rate": 7.895906230238123e-05,
"loss": 1.7496,
"step": 1440
},
{
"epoch": 23.77,
"learning_rate": 7.903420005587436e-05,
"loss": 1.7416,
"step": 1450
},
{
"epoch": 23.93,
"learning_rate": 7.910882139461093e-05,
"loss": 1.7315,
"step": 1460
},
{
"epoch": 24.1,
"learning_rate": 7.918293336870439e-05,
"loss": 1.7224,
"step": 1470
},
{
"epoch": 24.26,
"learning_rate": 7.925654288487392e-05,
"loss": 1.716,
"step": 1480
},
{
"epoch": 24.43,
"learning_rate": 7.932965671030685e-05,
"loss": 1.704,
"step": 1490
},
{
"epoch": 24.59,
"learning_rate": 7.940228147639202e-05,
"loss": 1.6873,
"step": 1500
},
{
"epoch": 24.59,
"eval_loss": 3.106438159942627,
"eval_runtime": 13.9459,
"eval_samples_per_second": 46.681,
"eval_steps_per_second": 0.574,
"step": 1500
},
{
"epoch": 24.75,
"learning_rate": 7.947442368232923e-05,
"loss": 1.7098,
"step": 1510
},
{
"epoch": 24.92,
"learning_rate": 7.954608969861931e-05,
"loss": 1.7058,
"step": 1520
},
{
"epoch": 25.08,
"learning_rate": 7.961728577043997e-05,
"loss": 1.7189,
"step": 1530
},
{
"epoch": 25.25,
"learning_rate": 7.968801802091157e-05,
"loss": 1.6689,
"step": 1540
},
{
"epoch": 25.41,
"learning_rate": 7.975829245425728e-05,
"loss": 1.709,
"step": 1550
},
{
"epoch": 25.57,
"learning_rate": 7.982811495886153e-05,
"loss": 1.6881,
"step": 1560
},
{
"epoch": 25.74,
"learning_rate": 7.989749131023083e-05,
"loss": 1.7032,
"step": 1570
},
{
"epoch": 25.9,
"learning_rate": 7.996642717386056e-05,
"loss": 1.6887,
"step": 1580
},
{
"epoch": 26.07,
"learning_rate": 8.003492810801127e-05,
"loss": 1.6961,
"step": 1590
},
{
"epoch": 26.23,
"learning_rate": 8.01029995663981e-05,
"loss": 1.6701,
"step": 1600
},
{
"epoch": 26.39,
"learning_rate": 8.017064690079624e-05,
"loss": 1.69,
"step": 1610
},
{
"epoch": 26.56,
"learning_rate": 8.023787536356576e-05,
"loss": 1.7125,
"step": 1620
},
{
"epoch": 26.72,
"learning_rate": 8.030469011009893e-05,
"loss": 1.6606,
"step": 1630
},
{
"epoch": 26.89,
"learning_rate": 8.037109620119243e-05,
"loss": 1.6649,
"step": 1640
},
{
"epoch": 27.05,
"learning_rate": 8.043709860534764e-05,
"loss": 1.6699,
"step": 1650
},
{
"epoch": 27.21,
"learning_rate": 8.050270220100136e-05,
"loss": 1.645,
"step": 1660
},
{
"epoch": 27.38,
"learning_rate": 8.056791177868957e-05,
"loss": 1.65,
"step": 1670
},
{
"epoch": 27.54,
"learning_rate": 8.063273204314657e-05,
"loss": 1.6552,
"step": 1680
},
{
"epoch": 27.7,
"learning_rate": 8.069716761534183e-05,
"loss": 1.6772,
"step": 1690
},
{
"epoch": 27.87,
"learning_rate": 8.076122303445684e-05,
"loss": 1.6664,
"step": 1700
},
{
"epoch": 28.03,
"learning_rate": 8.082490275980384e-05,
"loss": 1.6539,
"step": 1710
},
{
"epoch": 28.2,
"learning_rate": 8.088821117268871e-05,
"loss": 1.6616,
"step": 1720
},
{
"epoch": 28.36,
"learning_rate": 8.095115257821987e-05,
"loss": 1.6379,
"step": 1730
},
{
"epoch": 28.52,
"learning_rate": 8.1013731207065e-05,
"loss": 1.6536,
"step": 1740
},
{
"epoch": 28.69,
"learning_rate": 8.107595121715735e-05,
"loss": 1.6506,
"step": 1750
},
{
"epoch": 28.85,
"learning_rate": 8.113781669535373e-05,
"loss": 1.66,
"step": 1760
},
{
"epoch": 29.02,
"learning_rate": 8.119933165904515e-05,
"loss": 1.6548,
"step": 1770
},
{
"epoch": 29.18,
"learning_rate": 8.126050005772234e-05,
"loss": 1.6408,
"step": 1780
},
{
"epoch": 29.34,
"learning_rate": 8.132132577449732e-05,
"loss": 1.6533,
"step": 1790
},
{
"epoch": 29.51,
"learning_rate": 8.138181262758264e-05,
"loss": 1.6508,
"step": 1800
},
{
"epoch": 29.67,
"learning_rate": 8.144196437172959e-05,
"loss": 1.6302,
"step": 1810
},
{
"epoch": 29.84,
"learning_rate": 8.150178469962686e-05,
"loss": 1.6319,
"step": 1820
},
{
"epoch": 30.0,
"learning_rate": 8.156127724326073e-05,
"loss": 1.623,
"step": 1830
},
{
"epoch": 30.16,
"learning_rate": 8.16204455752384e-05,
"loss": 1.6299,
"step": 1840
},
{
"epoch": 30.33,
"learning_rate": 8.167929321007533e-05,
"loss": 1.6187,
"step": 1850
},
{
"epoch": 30.49,
"learning_rate": 8.17378236054479e-05,
"loss": 1.6138,
"step": 1860
},
{
"epoch": 30.66,
"learning_rate": 8.179604016341247e-05,
"loss": 1.6418,
"step": 1870
},
{
"epoch": 30.82,
"learning_rate": 8.1853946231592e-05,
"loss": 1.6433,
"step": 1880
},
{
"epoch": 30.98,
"learning_rate": 8.19115451043311e-05,
"loss": 1.6416,
"step": 1890
},
{
"epoch": 31.15,
"learning_rate": 8.196884002382071e-05,
"loss": 1.6244,
"step": 1900
},
{
"epoch": 31.31,
"learning_rate": 8.202583418119318e-05,
"loss": 1.6141,
"step": 1910
},
{
"epoch": 31.48,
"learning_rate": 8.208253071758874e-05,
"loss": 1.6033,
"step": 1920
},
{
"epoch": 31.64,
"learning_rate": 8.213893272519434e-05,
"loss": 1.6287,
"step": 1930
},
{
"epoch": 31.8,
"learning_rate": 8.219504324825564e-05,
"loss": 1.6268,
"step": 1940
},
{
"epoch": 31.97,
"learning_rate": 8.225086528406294e-05,
"loss": 1.6297,
"step": 1950
},
{
"epoch": 32.13,
"learning_rate": 8.23064017839119e-05,
"loss": 1.6012,
"step": 1960
},
{
"epoch": 32.3,
"learning_rate": 8.236165565403982e-05,
"loss": 1.6203,
"step": 1970
},
{
"epoch": 32.46,
"learning_rate": 8.241662975653826e-05,
"loss": 1.6107,
"step": 1980
},
{
"epoch": 32.62,
"learning_rate": 8.247132691024267e-05,
"loss": 1.6107,
"step": 1990
},
{
"epoch": 32.79,
"learning_rate": 8.252574989159953e-05,
"loss": 1.5886,
"step": 2000
},
{
"epoch": 32.79,
"eval_loss": 3.1871249675750732,
"eval_runtime": 13.1185,
"eval_samples_per_second": 49.624,
"eval_steps_per_second": 0.61,
"step": 2000
}
],
"max_steps": 100000,
"num_train_epochs": 1640,
"total_flos": 394015948800.0,
"trial_name": null,
"trial_params": null
}