| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 5.0, | |
| "eval_steps": 500, | |
| "global_step": 520, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.24096385542168675, | |
| "grad_norm": 0.09481784701347351, | |
| "learning_rate": 0.0005109898816807756, | |
| "loss": 1.2173, | |
| "mean_token_accuracy": 0.7412426620721817, | |
| "num_tokens": 819200.0, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.4819277108433735, | |
| "grad_norm": 0.12040279060602188, | |
| "learning_rate": 0.0005091581413578166, | |
| "loss": 0.4254, | |
| "mean_token_accuracy": 0.8813771998882294, | |
| "num_tokens": 1638400.0, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.7228915662650602, | |
| "grad_norm": 0.1103241890668869, | |
| "learning_rate": 0.0005041904358813742, | |
| "loss": 0.1818, | |
| "mean_token_accuracy": 0.9474865478277207, | |
| "num_tokens": 2457600.0, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.963855421686747, | |
| "grad_norm": 0.1028311550617218, | |
| "learning_rate": 0.000496148180789635, | |
| "loss": 0.0932, | |
| "mean_token_accuracy": 0.9730724042654038, | |
| "num_tokens": 3276800.0, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.0661650225520134, | |
| "eval_mean_token_accuracy": 0.9811175402174604, | |
| "eval_num_tokens": 3381248.0, | |
| "eval_runtime": 5.1057, | |
| "eval_samples_per_second": 72.273, | |
| "eval_steps_per_second": 9.205, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 1.202409638554217, | |
| "grad_norm": 0.07078867405653, | |
| "learning_rate": 0.0004851308021510392, | |
| "loss": 0.0498, | |
| "mean_token_accuracy": 0.9861284048870357, | |
| "num_tokens": 4069376.0, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.4433734939759035, | |
| "grad_norm": 0.0640600249171257, | |
| "learning_rate": 0.00047127450736389253, | |
| "loss": 0.0332, | |
| "mean_token_accuracy": 0.9904011791944504, | |
| "num_tokens": 4888576.0, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.6843373493975904, | |
| "grad_norm": 0.04669954255223274, | |
| "learning_rate": 0.0004547506012299107, | |
| "loss": 0.023, | |
| "mean_token_accuracy": 0.9934894973039627, | |
| "num_tokens": 5707776.0, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 1.9253012048192772, | |
| "grad_norm": 0.05227402225136757, | |
| "learning_rate": 0.00043576336812001216, | |
| "loss": 0.0192, | |
| "mean_token_accuracy": 0.9945132231712341, | |
| "num_tokens": 6526976.0, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.019761234521865845, | |
| "eval_mean_token_accuracy": 0.9943946092686755, | |
| "eval_num_tokens": 6762496.0, | |
| "eval_runtime": 5.0985, | |
| "eval_samples_per_second": 72.374, | |
| "eval_steps_per_second": 9.218, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 2.163855421686747, | |
| "grad_norm": 0.0476720966398716, | |
| "learning_rate": 0.0004145475464150544, | |
| "loss": 0.0182, | |
| "mean_token_accuracy": 0.9945257510801758, | |
| "num_tokens": 7319552.0, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 2.404819277108434, | |
| "grad_norm": 0.02922731079161167, | |
| "learning_rate": 0.00039136542644490095, | |
| "loss": 0.0157, | |
| "mean_token_accuracy": 0.9952898854017258, | |
| "num_tokens": 8138752.0, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 2.6457831325301204, | |
| "grad_norm": 0.02723618969321251, | |
| "learning_rate": 0.00036650360780387636, | |
| "loss": 0.0142, | |
| "mean_token_accuracy": 0.9957937997579575, | |
| "num_tokens": 8957952.0, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 2.886746987951807, | |
| "grad_norm": 0.02853013575077057, | |
| "learning_rate": 0.0003402694561317866, | |
| "loss": 0.0126, | |
| "mean_token_accuracy": 0.9962744742631913, | |
| "num_tokens": 9777152.0, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.01109593641012907, | |
| "eval_mean_token_accuracy": 0.996658629559456, | |
| "eval_num_tokens": 10143744.0, | |
| "eval_runtime": 5.1053, | |
| "eval_samples_per_second": 72.278, | |
| "eval_steps_per_second": 9.206, | |
| "step": 312 | |
| }, | |
| { | |
| "epoch": 3.125301204819277, | |
| "grad_norm": 0.026453198865056038, | |
| "learning_rate": 0.00031298730316517496, | |
| "loss": 0.0117, | |
| "mean_token_accuracy": 0.996507405632674, | |
| "num_tokens": 10569728.0, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 3.3662650602409636, | |
| "grad_norm": 0.012349753640592098, | |
| "learning_rate": 0.00028499443703742166, | |
| "loss": 0.0106, | |
| "mean_token_accuracy": 0.996728241443634, | |
| "num_tokens": 11388928.0, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 3.6072289156626507, | |
| "grad_norm": 0.010343802161514759, | |
| "learning_rate": 0.0002566369323994442, | |
| "loss": 0.011, | |
| "mean_token_accuracy": 0.9966022622585297, | |
| "num_tokens": 12208128.0, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 3.8481927710843373, | |
| "grad_norm": 0.01229863241314888, | |
| "learning_rate": 0.0002282653719130422, | |
| "loss": 0.01, | |
| "mean_token_accuracy": 0.9968224185705185, | |
| "num_tokens": 13027328.0, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 0.01064043864607811, | |
| "eval_mean_token_accuracy": 0.9967445059025541, | |
| "eval_num_tokens": 13524992.0, | |
| "eval_runtime": 5.0984, | |
| "eval_samples_per_second": 72.376, | |
| "eval_steps_per_second": 9.219, | |
| "step": 416 | |
| }, | |
| { | |
| "epoch": 4.086746987951807, | |
| "grad_norm": 0.009697753936052322, | |
| "learning_rate": 0.0002002305120118911, | |
| "loss": 0.0098, | |
| "mean_token_accuracy": 0.9968422109430487, | |
| "num_tokens": 13819904.0, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 4.327710843373494, | |
| "grad_norm": 0.009794726967811584, | |
| "learning_rate": 0.0001728789465141988, | |
| "loss": 0.0096, | |
| "mean_token_accuracy": 0.996862781047821, | |
| "num_tokens": 14639104.0, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 4.5686746987951805, | |
| "grad_norm": 0.013056655414402485, | |
| "learning_rate": 0.00014654882169760835, | |
| "loss": 0.0094, | |
| "mean_token_accuracy": 0.9969141507148742, | |
| "num_tokens": 15458304.0, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 4.809638554216868, | |
| "grad_norm": 0.00956389307975769, | |
| "learning_rate": 0.00012156565581069528, | |
| "loss": 0.0094, | |
| "mean_token_accuracy": 0.9968933582305908, | |
| "num_tokens": 16277504.0, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 0.009822968393564224, | |
| "eval_mean_token_accuracy": 0.9968616100067788, | |
| "eval_num_tokens": 16906240.0, | |
| "eval_runtime": 5.1012, | |
| "eval_samples_per_second": 72.335, | |
| "eval_steps_per_second": 9.213, | |
| "step": 520 | |
| } | |
| ], | |
| "logging_steps": 25, | |
| "max_steps": 728, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 7, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 7.3760476200593e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |