prajwalJumde's picture
End of training
0bfb810 verified
raw
history blame contribute delete
No virus
7.77 kB
{
"best_metric": 0.8250355618776671,
"best_model_checkpoint": "MRR_image_classification_dit_29_jan-finetuned-eurosat/checkpoint-525",
"epoch": 2.987197724039829,
"eval_steps": 500,
"global_step": 525,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06,
"learning_rate": 9.433962264150944e-06,
"loss": 2.0011,
"step": 10
},
{
"epoch": 0.11,
"learning_rate": 1.8867924528301888e-05,
"loss": 1.411,
"step": 20
},
{
"epoch": 0.17,
"learning_rate": 2.830188679245283e-05,
"loss": 1.2419,
"step": 30
},
{
"epoch": 0.23,
"learning_rate": 3.7735849056603776e-05,
"loss": 1.24,
"step": 40
},
{
"epoch": 0.28,
"learning_rate": 4.716981132075472e-05,
"loss": 1.1031,
"step": 50
},
{
"epoch": 0.34,
"learning_rate": 4.925847457627119e-05,
"loss": 1.0103,
"step": 60
},
{
"epoch": 0.4,
"learning_rate": 4.819915254237288e-05,
"loss": 1.1038,
"step": 70
},
{
"epoch": 0.46,
"learning_rate": 4.7139830508474584e-05,
"loss": 1.1555,
"step": 80
},
{
"epoch": 0.51,
"learning_rate": 4.608050847457627e-05,
"loss": 1.0994,
"step": 90
},
{
"epoch": 0.57,
"learning_rate": 4.502118644067797e-05,
"loss": 1.1303,
"step": 100
},
{
"epoch": 0.63,
"learning_rate": 4.396186440677966e-05,
"loss": 1.0542,
"step": 110
},
{
"epoch": 0.68,
"learning_rate": 4.290254237288136e-05,
"loss": 1.0717,
"step": 120
},
{
"epoch": 0.74,
"learning_rate": 4.1843220338983054e-05,
"loss": 1.0289,
"step": 130
},
{
"epoch": 0.8,
"learning_rate": 4.078389830508475e-05,
"loss": 0.9717,
"step": 140
},
{
"epoch": 0.85,
"learning_rate": 3.9724576271186445e-05,
"loss": 0.9926,
"step": 150
},
{
"epoch": 0.91,
"learning_rate": 3.866525423728814e-05,
"loss": 0.9859,
"step": 160
},
{
"epoch": 0.97,
"learning_rate": 3.7605932203389835e-05,
"loss": 1.0588,
"step": 170
},
{
"epoch": 1.0,
"eval_accuracy": 0.6621621621621622,
"eval_loss": 0.8930806517601013,
"eval_runtime": 59.7101,
"eval_samples_per_second": 23.547,
"eval_steps_per_second": 2.948,
"step": 175
},
{
"epoch": 1.02,
"learning_rate": 3.654661016949153e-05,
"loss": 0.9578,
"step": 180
},
{
"epoch": 1.08,
"learning_rate": 3.548728813559322e-05,
"loss": 0.9455,
"step": 190
},
{
"epoch": 1.14,
"learning_rate": 3.442796610169492e-05,
"loss": 0.9959,
"step": 200
},
{
"epoch": 1.19,
"learning_rate": 3.336864406779661e-05,
"loss": 0.9689,
"step": 210
},
{
"epoch": 1.25,
"learning_rate": 3.230932203389831e-05,
"loss": 0.9743,
"step": 220
},
{
"epoch": 1.31,
"learning_rate": 3.125e-05,
"loss": 0.8971,
"step": 230
},
{
"epoch": 1.37,
"learning_rate": 3.0190677966101693e-05,
"loss": 0.8917,
"step": 240
},
{
"epoch": 1.42,
"learning_rate": 2.913135593220339e-05,
"loss": 0.7242,
"step": 250
},
{
"epoch": 1.48,
"learning_rate": 2.8072033898305083e-05,
"loss": 0.9562,
"step": 260
},
{
"epoch": 1.54,
"learning_rate": 2.7012711864406782e-05,
"loss": 0.8342,
"step": 270
},
{
"epoch": 1.59,
"learning_rate": 2.5953389830508474e-05,
"loss": 0.8166,
"step": 280
},
{
"epoch": 1.65,
"learning_rate": 2.489406779661017e-05,
"loss": 0.829,
"step": 290
},
{
"epoch": 1.71,
"learning_rate": 2.3834745762711865e-05,
"loss": 0.7821,
"step": 300
},
{
"epoch": 1.76,
"learning_rate": 2.277542372881356e-05,
"loss": 0.8847,
"step": 310
},
{
"epoch": 1.82,
"learning_rate": 2.1716101694915255e-05,
"loss": 0.7451,
"step": 320
},
{
"epoch": 1.88,
"learning_rate": 2.065677966101695e-05,
"loss": 0.7431,
"step": 330
},
{
"epoch": 1.93,
"learning_rate": 1.9597457627118646e-05,
"loss": 0.7861,
"step": 340
},
{
"epoch": 1.99,
"learning_rate": 1.853813559322034e-05,
"loss": 0.7206,
"step": 350
},
{
"epoch": 2.0,
"eval_accuracy": 0.7773826458036984,
"eval_loss": 0.6266400218009949,
"eval_runtime": 57.5992,
"eval_samples_per_second": 24.41,
"eval_steps_per_second": 3.056,
"step": 351
},
{
"epoch": 2.05,
"learning_rate": 1.7478813559322037e-05,
"loss": 0.6948,
"step": 360
},
{
"epoch": 2.11,
"learning_rate": 1.641949152542373e-05,
"loss": 0.7623,
"step": 370
},
{
"epoch": 2.16,
"learning_rate": 1.5360169491525424e-05,
"loss": 0.6679,
"step": 380
},
{
"epoch": 2.22,
"learning_rate": 1.430084745762712e-05,
"loss": 0.623,
"step": 390
},
{
"epoch": 2.28,
"learning_rate": 1.3241525423728815e-05,
"loss": 0.7125,
"step": 400
},
{
"epoch": 2.33,
"learning_rate": 1.2182203389830509e-05,
"loss": 0.8716,
"step": 410
},
{
"epoch": 2.39,
"learning_rate": 1.1122881355932204e-05,
"loss": 0.7073,
"step": 420
},
{
"epoch": 2.45,
"learning_rate": 1.0063559322033898e-05,
"loss": 0.722,
"step": 430
},
{
"epoch": 2.5,
"learning_rate": 9.004237288135593e-06,
"loss": 0.733,
"step": 440
},
{
"epoch": 2.56,
"learning_rate": 7.944915254237288e-06,
"loss": 0.6861,
"step": 450
},
{
"epoch": 2.62,
"learning_rate": 6.885593220338983e-06,
"loss": 0.6785,
"step": 460
},
{
"epoch": 2.67,
"learning_rate": 5.826271186440678e-06,
"loss": 0.71,
"step": 470
},
{
"epoch": 2.73,
"learning_rate": 4.766949152542373e-06,
"loss": 0.6091,
"step": 480
},
{
"epoch": 2.79,
"learning_rate": 3.707627118644068e-06,
"loss": 0.754,
"step": 490
},
{
"epoch": 2.84,
"learning_rate": 2.648305084745763e-06,
"loss": 0.6406,
"step": 500
},
{
"epoch": 2.9,
"learning_rate": 1.5889830508474576e-06,
"loss": 0.6826,
"step": 510
},
{
"epoch": 2.96,
"learning_rate": 5.296610169491525e-07,
"loss": 0.6833,
"step": 520
},
{
"epoch": 2.99,
"eval_accuracy": 0.8250355618776671,
"eval_loss": 0.4995260536670685,
"eval_runtime": 58.3941,
"eval_samples_per_second": 24.078,
"eval_steps_per_second": 3.014,
"step": 525
},
{
"epoch": 2.99,
"step": 525,
"total_flos": 4.6009321944033853e+18,
"train_loss": 0.9024475538162958,
"train_runtime": 1330.277,
"train_samples_per_second": 12.676,
"train_steps_per_second": 0.395
}
],
"logging_steps": 10,
"max_steps": 525,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"total_flos": 4.6009321944033853e+18,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}