autoprogrammer's picture
Upload sdar_4b_multi_block_causal-checkpoint-117
aa67428 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 117,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.042735042735042736,
"grad_norm": 3.711402177810669,
"learning_rate": 3.6363636363636366e-06,
"loss": 0.2434,
"step": 5
},
{
"epoch": 0.08547008547008547,
"grad_norm": 3.794971227645874,
"learning_rate": 8.181818181818183e-06,
"loss": 0.1924,
"step": 10
},
{
"epoch": 0.1282051282051282,
"grad_norm": 2.4459402561187744,
"learning_rate": 9.998079135987437e-06,
"loss": 0.174,
"step": 15
},
{
"epoch": 0.17094017094017094,
"grad_norm": 2.0912649631500244,
"learning_rate": 9.98634586692894e-06,
"loss": 0.1488,
"step": 20
},
{
"epoch": 0.21367521367521367,
"grad_norm": 1.9830565452575684,
"learning_rate": 9.963971484502247e-06,
"loss": 0.1332,
"step": 25
},
{
"epoch": 0.2564102564102564,
"grad_norm": 1.4833670854568481,
"learning_rate": 9.931003736767013e-06,
"loss": 0.1146,
"step": 30
},
{
"epoch": 0.29914529914529914,
"grad_norm": 1.437637448310852,
"learning_rate": 9.887512978558329e-06,
"loss": 0.1175,
"step": 35
},
{
"epoch": 0.3418803418803419,
"grad_norm": 1.591968297958374,
"learning_rate": 9.833592021345938e-06,
"loss": 0.1114,
"step": 40
},
{
"epoch": 0.38461538461538464,
"grad_norm": 1.383669376373291,
"learning_rate": 9.76935593516989e-06,
"loss": 0.1092,
"step": 45
},
{
"epoch": 0.42735042735042733,
"grad_norm": 1.2138135433197021,
"learning_rate": 9.694941803075285e-06,
"loss": 0.1005,
"step": 50
},
{
"epoch": 0.4700854700854701,
"grad_norm": 1.317816138267517,
"learning_rate": 9.610508428570122e-06,
"loss": 0.1079,
"step": 55
},
{
"epoch": 0.5128205128205128,
"grad_norm": 1.1774816513061523,
"learning_rate": 9.516235996730645e-06,
"loss": 0.1003,
"step": 60
},
{
"epoch": 0.5555555555555556,
"grad_norm": 1.3384287357330322,
"learning_rate": 9.41232568967728e-06,
"loss": 0.1005,
"step": 65
},
{
"epoch": 0.5982905982905983,
"grad_norm": 1.0372443199157715,
"learning_rate": 9.298999257241862e-06,
"loss": 0.1029,
"step": 70
},
{
"epoch": 0.6410256410256411,
"grad_norm": 1.1703890562057495,
"learning_rate": 9.176498543742328e-06,
"loss": 0.0988,
"step": 75
},
{
"epoch": 0.6837606837606838,
"grad_norm": 1.1787683963775635,
"learning_rate": 9.045084971874738e-06,
"loss": 0.0936,
"step": 80
},
{
"epoch": 0.7264957264957265,
"grad_norm": 1.180239200592041,
"learning_rate": 8.905038984824079e-06,
"loss": 0.1036,
"step": 85
},
{
"epoch": 0.7692307692307693,
"grad_norm": 1.1398251056671143,
"learning_rate": 8.756659447784367e-06,
"loss": 0.1,
"step": 90
},
{
"epoch": 0.811965811965812,
"grad_norm": 1.2935456037521362,
"learning_rate": 8.600263010165275e-06,
"loss": 0.0956,
"step": 95
},
{
"epoch": 0.8547008547008547,
"grad_norm": 1.1012020111083984,
"learning_rate": 8.436183429846314e-06,
"loss": 0.1028,
"step": 100
},
{
"epoch": 0.8974358974358975,
"grad_norm": 1.0556975603103638,
"learning_rate": 8.264770860920722e-06,
"loss": 0.1077,
"step": 105
},
{
"epoch": 0.9401709401709402,
"grad_norm": 1.069830298423767,
"learning_rate": 8.086391106448965e-06,
"loss": 0.0976,
"step": 110
},
{
"epoch": 0.9829059829059829,
"grad_norm": 1.019631028175354,
"learning_rate": 7.90142483781658e-06,
"loss": 0.1008,
"step": 115
}
],
"logging_steps": 5,
"max_steps": 351,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 9.139253540605133e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}