AFM-WebAgent-7B-sft / trainer_state.json
wzyxwqx's picture
Upload folder using huggingface_hub
795192b verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.663900414937759,
"eval_steps": 500,
"global_step": 80,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03319502074688797,
"grad_norm": 8.615787895890602,
"learning_rate": 0.0,
"loss": 1.3636,
"step": 1
},
{
"epoch": 0.06639004149377593,
"grad_norm": 8.559176870090305,
"learning_rate": 2.666666666666667e-06,
"loss": 1.39,
"step": 2
},
{
"epoch": 0.0995850622406639,
"grad_norm": 7.763393937169438,
"learning_rate": 5.333333333333334e-06,
"loss": 1.3674,
"step": 3
},
{
"epoch": 0.13278008298755187,
"grad_norm": 3.1671709519080324,
"learning_rate": 8.000000000000001e-06,
"loss": 1.2173,
"step": 4
},
{
"epoch": 0.16597510373443983,
"grad_norm": 3.04503337779613,
"learning_rate": 1.0666666666666667e-05,
"loss": 1.0941,
"step": 5
},
{
"epoch": 0.1991701244813278,
"grad_norm": 2.989964302933358,
"learning_rate": 1.3333333333333333e-05,
"loss": 1.1226,
"step": 6
},
{
"epoch": 0.23236514522821577,
"grad_norm": 3.459388997412336,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.0734,
"step": 7
},
{
"epoch": 0.26556016597510373,
"grad_norm": 2.533839407605375,
"learning_rate": 1.866666666666667e-05,
"loss": 1.0045,
"step": 8
},
{
"epoch": 0.2987551867219917,
"grad_norm": 2.8224948362089712,
"learning_rate": 2.1333333333333335e-05,
"loss": 0.9903,
"step": 9
},
{
"epoch": 0.33195020746887965,
"grad_norm": 2.1132471970550197,
"learning_rate": 2.4e-05,
"loss": 0.9568,
"step": 10
},
{
"epoch": 0.3651452282157676,
"grad_norm": 1.8276852389668479,
"learning_rate": 2.6666666666666667e-05,
"loss": 0.945,
"step": 11
},
{
"epoch": 0.3983402489626556,
"grad_norm": 1.9070208027494653,
"learning_rate": 2.9333333333333333e-05,
"loss": 0.8958,
"step": 12
},
{
"epoch": 0.4315352697095436,
"grad_norm": 1.5086701253320605,
"learning_rate": 3.2000000000000005e-05,
"loss": 0.8932,
"step": 13
},
{
"epoch": 0.46473029045643155,
"grad_norm": 1.6692252959799796,
"learning_rate": 3.466666666666667e-05,
"loss": 0.8899,
"step": 14
},
{
"epoch": 0.4979253112033195,
"grad_norm": 1.5543883507537406,
"learning_rate": 3.733333333333334e-05,
"loss": 0.8962,
"step": 15
},
{
"epoch": 0.5311203319502075,
"grad_norm": 1.5207028288253677,
"learning_rate": 4e-05,
"loss": 0.8958,
"step": 16
},
{
"epoch": 0.5643153526970954,
"grad_norm": 1.3772276740689315,
"learning_rate": 3.999458482358924e-05,
"loss": 0.8678,
"step": 17
},
{
"epoch": 0.5975103734439834,
"grad_norm": 1.2350882249364952,
"learning_rate": 3.99783422267705e-05,
"loss": 0.8698,
"step": 18
},
{
"epoch": 0.6307053941908713,
"grad_norm": 1.5168399267769646,
"learning_rate": 3.9951281005196486e-05,
"loss": 0.8467,
"step": 19
},
{
"epoch": 0.6639004149377593,
"grad_norm": 0.9947073114646282,
"learning_rate": 3.991341581299609e-05,
"loss": 0.851,
"step": 20
},
{
"epoch": 0.6970954356846473,
"grad_norm": 1.3721835937192417,
"learning_rate": 3.9864767154838864e-05,
"loss": 0.8212,
"step": 21
},
{
"epoch": 0.7302904564315352,
"grad_norm": 1.1421321822572974,
"learning_rate": 3.980536137483141e-05,
"loss": 0.8349,
"step": 22
},
{
"epoch": 0.7634854771784232,
"grad_norm": 1.158024116310333,
"learning_rate": 3.973523064225159e-05,
"loss": 0.8199,
"step": 23
},
{
"epoch": 0.7966804979253111,
"grad_norm": 0.8300618356114078,
"learning_rate": 3.965441293412827e-05,
"loss": 0.8362,
"step": 24
},
{
"epoch": 0.8298755186721992,
"grad_norm": 0.9468275757503992,
"learning_rate": 3.9562952014676116e-05,
"loss": 0.8168,
"step": 25
},
{
"epoch": 0.8630705394190872,
"grad_norm": 0.9398559845087756,
"learning_rate": 3.946089741159648e-05,
"loss": 0.8171,
"step": 26
},
{
"epoch": 0.8962655601659751,
"grad_norm": 0.9485219729922953,
"learning_rate": 3.934830438925728e-05,
"loss": 0.8012,
"step": 27
},
{
"epoch": 0.9294605809128631,
"grad_norm": 1.0308850725665082,
"learning_rate": 3.922523391876638e-05,
"loss": 0.7854,
"step": 28
},
{
"epoch": 0.9626556016597511,
"grad_norm": 1.20395138490915,
"learning_rate": 3.909175264495464e-05,
"loss": 0.8047,
"step": 29
},
{
"epoch": 0.995850622406639,
"grad_norm": 0.8448920980747194,
"learning_rate": 3.8947932850286585e-05,
"loss": 0.7854,
"step": 30
},
{
"epoch": 1.033195020746888,
"grad_norm": 2.245209172954801,
"learning_rate": 3.879385241571817e-05,
"loss": 1.5149,
"step": 31
},
{
"epoch": 1.066390041493776,
"grad_norm": 0.9134845133034926,
"learning_rate": 3.862959477852285e-05,
"loss": 0.7548,
"step": 32
},
{
"epoch": 1.099585062240664,
"grad_norm": 0.9930546625802095,
"learning_rate": 3.845524888710885e-05,
"loss": 0.759,
"step": 33
},
{
"epoch": 1.1327800829875518,
"grad_norm": 0.77588901645867,
"learning_rate": 3.827090915285202e-05,
"loss": 0.7341,
"step": 34
},
{
"epoch": 1.16597510373444,
"grad_norm": 0.6668617646330832,
"learning_rate": 3.807667539897041e-05,
"loss": 0.7416,
"step": 35
},
{
"epoch": 1.1991701244813278,
"grad_norm": 0.78348508679161,
"learning_rate": 3.787265280646825e-05,
"loss": 0.7404,
"step": 36
},
{
"epoch": 1.2323651452282158,
"grad_norm": 1.064645095474325,
"learning_rate": 3.7658951857178544e-05,
"loss": 0.7479,
"step": 37
},
{
"epoch": 1.2655601659751037,
"grad_norm": 0.7188618680855522,
"learning_rate": 3.743568827393525e-05,
"loss": 0.7191,
"step": 38
},
{
"epoch": 1.2987551867219918,
"grad_norm": 0.8332631243928624,
"learning_rate": 3.720298295790732e-05,
"loss": 0.7132,
"step": 39
},
{
"epoch": 1.3319502074688796,
"grad_norm": 0.7102694689937566,
"learning_rate": 3.696096192312852e-05,
"loss": 0.7087,
"step": 40
},
{
"epoch": 1.3651452282157677,
"grad_norm": 0.6710612259560924,
"learning_rate": 3.6709756228258735e-05,
"loss": 0.7257,
"step": 41
},
{
"epoch": 1.3983402489626555,
"grad_norm": 0.6935377627613012,
"learning_rate": 3.644950190561325e-05,
"loss": 0.732,
"step": 42
},
{
"epoch": 1.4315352697095436,
"grad_norm": 0.6345437504230033,
"learning_rate": 3.6180339887498953e-05,
"loss": 0.7285,
"step": 43
},
{
"epoch": 1.4647302904564317,
"grad_norm": 0.569179877257485,
"learning_rate": 3.590241592989696e-05,
"loss": 0.7272,
"step": 44
},
{
"epoch": 1.4979253112033195,
"grad_norm": 0.5638090957495433,
"learning_rate": 3.561588053353319e-05,
"loss": 0.724,
"step": 45
},
{
"epoch": 1.5311203319502074,
"grad_norm": 0.6847005696091287,
"learning_rate": 3.532088886237956e-05,
"loss": 0.7303,
"step": 46
},
{
"epoch": 1.5643153526970954,
"grad_norm": 0.7137303594813845,
"learning_rate": 3.5017600659629986e-05,
"loss": 0.7226,
"step": 47
},
{
"epoch": 1.5975103734439835,
"grad_norm": 0.6692629863007482,
"learning_rate": 3.470618016119658e-05,
"loss": 0.7317,
"step": 48
},
{
"epoch": 1.6307053941908713,
"grad_norm": 0.6130719713014338,
"learning_rate": 3.438679600677303e-05,
"loss": 0.7135,
"step": 49
},
{
"epoch": 1.6639004149377592,
"grad_norm": 0.7269780774769957,
"learning_rate": 3.405962114851324e-05,
"loss": 0.7069,
"step": 50
},
{
"epoch": 1.6970954356846473,
"grad_norm": 0.7675385685925549,
"learning_rate": 3.372483275737468e-05,
"loss": 0.6947,
"step": 51
},
{
"epoch": 1.7302904564315353,
"grad_norm": 0.5557654669776614,
"learning_rate": 3.3382612127177166e-05,
"loss": 0.7114,
"step": 52
},
{
"epoch": 1.7634854771784232,
"grad_norm": 0.843893721004563,
"learning_rate": 3.303314457642911e-05,
"loss": 0.6951,
"step": 53
},
{
"epoch": 1.796680497925311,
"grad_norm": 0.7044204542667526,
"learning_rate": 3.26766193479742e-05,
"loss": 0.7102,
"step": 54
},
{
"epoch": 1.8298755186721993,
"grad_norm": 0.5932946307804874,
"learning_rate": 3.2313229506513167e-05,
"loss": 0.6879,
"step": 55
},
{
"epoch": 1.8630705394190872,
"grad_norm": 0.5180970490499838,
"learning_rate": 3.194317183405573e-05,
"loss": 0.694,
"step": 56
},
{
"epoch": 1.896265560165975,
"grad_norm": 0.7223180987342492,
"learning_rate": 3.156664672335973e-05,
"loss": 0.7043,
"step": 57
},
{
"epoch": 1.929460580912863,
"grad_norm": 0.6156946074167337,
"learning_rate": 3.1183858069414936e-05,
"loss": 0.7018,
"step": 58
},
{
"epoch": 1.9626556016597512,
"grad_norm": 0.6661521028388685,
"learning_rate": 3.079501315903026e-05,
"loss": 0.713,
"step": 59
},
{
"epoch": 1.995850622406639,
"grad_norm": 0.57891277679672,
"learning_rate": 3.0400322558584308e-05,
"loss": 0.7126,
"step": 60
},
{
"epoch": 2.033195020746888,
"grad_norm": 2.0063313681311743,
"learning_rate": 3.0000000000000004e-05,
"loss": 1.3315,
"step": 61
},
{
"epoch": 2.066390041493776,
"grad_norm": 0.7924238362204284,
"learning_rate": 2.959426226500493e-05,
"loss": 0.6405,
"step": 62
},
{
"epoch": 2.099585062240664,
"grad_norm": 0.8652584407517231,
"learning_rate": 2.9183329067740235e-05,
"loss": 0.6286,
"step": 63
},
{
"epoch": 2.132780082987552,
"grad_norm": 0.717303390179401,
"learning_rate": 2.876742293578155e-05,
"loss": 0.6204,
"step": 64
},
{
"epoch": 2.1659751037344397,
"grad_norm": 0.9109192659165971,
"learning_rate": 2.834676908963636e-05,
"loss": 0.6279,
"step": 65
},
{
"epoch": 2.199170124481328,
"grad_norm": 0.8861815091608722,
"learning_rate": 2.792159532078314e-05,
"loss": 0.6249,
"step": 66
},
{
"epoch": 2.232365145228216,
"grad_norm": 0.7504327505581353,
"learning_rate": 2.7492131868318247e-05,
"loss": 0.6296,
"step": 67
},
{
"epoch": 2.2655601659751037,
"grad_norm": 0.7468142972930245,
"learning_rate": 2.7058611294277378e-05,
"loss": 0.6328,
"step": 68
},
{
"epoch": 2.2987551867219915,
"grad_norm": 0.782870677435499,
"learning_rate": 2.6621268357699165e-05,
"loss": 0.6181,
"step": 69
},
{
"epoch": 2.33195020746888,
"grad_norm": 0.5486446333222698,
"learning_rate": 2.618033988749895e-05,
"loss": 0.6202,
"step": 70
},
{
"epoch": 2.3651452282157677,
"grad_norm": 0.6140586451457445,
"learning_rate": 2.5736064654221808e-05,
"loss": 0.6123,
"step": 71
},
{
"epoch": 2.3983402489626555,
"grad_norm": 0.5685554543391838,
"learning_rate": 2.528868324074405e-05,
"loss": 0.6197,
"step": 72
},
{
"epoch": 2.431535269709544,
"grad_norm": 0.5711825546726801,
"learning_rate": 2.4838437911993355e-05,
"loss": 0.6256,
"step": 73
},
{
"epoch": 2.4647302904564317,
"grad_norm": 0.7020688481999959,
"learning_rate": 2.4385572483758066e-05,
"loss": 0.6086,
"step": 74
},
{
"epoch": 2.4979253112033195,
"grad_norm": 0.5087577150668112,
"learning_rate": 2.3930332190656604e-05,
"loss": 0.611,
"step": 75
},
{
"epoch": 2.5311203319502074,
"grad_norm": 0.6537825147166224,
"learning_rate": 2.3472963553338614e-05,
"loss": 0.6193,
"step": 76
},
{
"epoch": 2.564315352697095,
"grad_norm": 0.45499946823109266,
"learning_rate": 2.3013714244989665e-05,
"loss": 0.6071,
"step": 77
},
{
"epoch": 2.5975103734439835,
"grad_norm": 0.6174423025200944,
"learning_rate": 2.25528329572119e-05,
"loss": 0.617,
"step": 78
},
{
"epoch": 2.6307053941908713,
"grad_norm": 0.4778613015683359,
"learning_rate": 2.209056926535307e-05,
"loss": 0.6067,
"step": 79
},
{
"epoch": 2.663900414937759,
"grad_norm": 0.556533197381377,
"learning_rate": 2.1627173493357167e-05,
"loss": 0.6044,
"step": 80
}
],
"logging_steps": 1,
"max_steps": 150,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 10,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.466139400429896e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}