|
{ |
|
"best_metric": null, |
|
"best_model_checkpoint": null, |
|
"epoch": 1.046207497820401, |
|
"eval_steps": 500, |
|
"global_step": 1200, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.08718395815170009, |
|
"grad_norm": 1260.8033447265625, |
|
"learning_rate": 8.333333333333333e-07, |
|
"loss": 14.8341, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 0.17436791630340018, |
|
"grad_norm": 745.888427734375, |
|
"learning_rate": 9.259259259259259e-07, |
|
"loss": 15.6171, |
|
"step": 200 |
|
}, |
|
{ |
|
"epoch": 0.26155187445510025, |
|
"grad_norm": 840.9533081054688, |
|
"learning_rate": 8.333333333333333e-07, |
|
"loss": 15.8498, |
|
"step": 300 |
|
}, |
|
{ |
|
"epoch": 0.34873583260680036, |
|
"grad_norm": 373.6918029785156, |
|
"learning_rate": 7.407407407407406e-07, |
|
"loss": 16.6859, |
|
"step": 400 |
|
}, |
|
{ |
|
"epoch": 0.43591979075850046, |
|
"grad_norm": 580.3277587890625, |
|
"learning_rate": 6.481481481481481e-07, |
|
"loss": 14.4898, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.5231037489102005, |
|
"grad_norm": 77.99569702148438, |
|
"learning_rate": 5.555555555555555e-07, |
|
"loss": 15.7702, |
|
"step": 600 |
|
}, |
|
{ |
|
"epoch": 0.6102877070619006, |
|
"grad_norm": 147.39584350585938, |
|
"learning_rate": 4.6296296296296297e-07, |
|
"loss": 15.0814, |
|
"step": 700 |
|
}, |
|
{ |
|
"epoch": 0.6974716652136007, |
|
"grad_norm": 141.38795471191406, |
|
"learning_rate": 3.703703703703703e-07, |
|
"loss": 12.6944, |
|
"step": 800 |
|
}, |
|
{ |
|
"epoch": 0.7846556233653008, |
|
"grad_norm": 947.9025268554688, |
|
"learning_rate": 2.7777777777777776e-07, |
|
"loss": 12.4095, |
|
"step": 900 |
|
}, |
|
{ |
|
"epoch": 0.8718395815170009, |
|
"grad_norm": 862.9531860351562, |
|
"learning_rate": 1.8518518518518516e-07, |
|
"loss": 12.4597, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.9590235396687009, |
|
"grad_norm": 247.12240600585938, |
|
"learning_rate": 9.259259259259258e-08, |
|
"loss": 11.3951, |
|
"step": 1100 |
|
}, |
|
{ |
|
"epoch": 1.0, |
|
"eval_loss": 476.5635681152344, |
|
"eval_runtime": 22.3653, |
|
"eval_samples_per_second": 45.606, |
|
"eval_steps_per_second": 5.723, |
|
"step": 1147 |
|
}, |
|
{ |
|
"epoch": 1.046207497820401, |
|
"grad_norm": 573.4383544921875, |
|
"learning_rate": 0.0, |
|
"loss": 12.2434, |
|
"step": 1200 |
|
} |
|
], |
|
"logging_steps": 100, |
|
"max_steps": 1200, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 2, |
|
"save_steps": 600, |
|
"total_flos": 0.0, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|