| { |
| "best_global_step": null, |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 0.6091370558375635, |
| "eval_steps": 500, |
| "global_step": 3000, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.050761421319796954, |
| "grad_norm": 0.7555294632911682, |
| "learning_rate": 9.831472081218275e-05, |
| "loss": 3.185, |
| "step": 250 |
| }, |
| { |
| "epoch": 0.10152284263959391, |
| "grad_norm": 0.7812637090682983, |
| "learning_rate": 9.662267343485618e-05, |
| "loss": 3.1833, |
| "step": 500 |
| }, |
| { |
| "epoch": 0.10152284263959391, |
| "eval_loss": 2.7107906341552734, |
| "eval_runtime": 88.982, |
| "eval_samples_per_second": 110.697, |
| "eval_steps_per_second": 3.697, |
| "step": 500 |
| }, |
| { |
| "epoch": 0.15228426395939088, |
| "grad_norm": 0.7523223161697388, |
| "learning_rate": 9.493062605752962e-05, |
| "loss": 3.1846, |
| "step": 750 |
| }, |
| { |
| "epoch": 0.20304568527918782, |
| "grad_norm": 0.820755124092102, |
| "learning_rate": 9.323857868020304e-05, |
| "loss": 3.1779, |
| "step": 1000 |
| }, |
| { |
| "epoch": 0.20304568527918782, |
| "eval_loss": 2.701174020767212, |
| "eval_runtime": 88.9362, |
| "eval_samples_per_second": 110.754, |
| "eval_steps_per_second": 3.699, |
| "step": 1000 |
| }, |
| { |
| "epoch": 0.25380710659898476, |
| "grad_norm": 0.7443365454673767, |
| "learning_rate": 9.154653130287648e-05, |
| "loss": 3.1669, |
| "step": 1250 |
| }, |
| { |
| "epoch": 0.30456852791878175, |
| "grad_norm": 0.7510855197906494, |
| "learning_rate": 8.985448392554991e-05, |
| "loss": 3.163, |
| "step": 1500 |
| }, |
| { |
| "epoch": 0.30456852791878175, |
| "eval_loss": 2.6923437118530273, |
| "eval_runtime": 88.8708, |
| "eval_samples_per_second": 110.835, |
| "eval_steps_per_second": 3.702, |
| "step": 1500 |
| }, |
| { |
| "epoch": 0.3553299492385787, |
| "grad_norm": 0.7907871007919312, |
| "learning_rate": 8.816243654822337e-05, |
| "loss": 3.1668, |
| "step": 1750 |
| }, |
| { |
| "epoch": 0.40609137055837563, |
| "grad_norm": 0.8167365193367004, |
| "learning_rate": 8.647038917089679e-05, |
| "loss": 3.1533, |
| "step": 2000 |
| }, |
| { |
| "epoch": 0.40609137055837563, |
| "eval_loss": 2.68522310256958, |
| "eval_runtime": 88.8442, |
| "eval_samples_per_second": 110.868, |
| "eval_steps_per_second": 3.703, |
| "step": 2000 |
| }, |
| { |
| "epoch": 0.45685279187817257, |
| "grad_norm": 0.7124233841896057, |
| "learning_rate": 8.477834179357022e-05, |
| "loss": 3.157, |
| "step": 2250 |
| }, |
| { |
| "epoch": 0.5076142131979695, |
| "grad_norm": 0.7829596400260925, |
| "learning_rate": 8.308629441624366e-05, |
| "loss": 3.1582, |
| "step": 2500 |
| }, |
| { |
| "epoch": 0.5076142131979695, |
| "eval_loss": 2.681734561920166, |
| "eval_runtime": 88.8737, |
| "eval_samples_per_second": 110.831, |
| "eval_steps_per_second": 3.702, |
| "step": 2500 |
| }, |
| { |
| "epoch": 0.5583756345177665, |
| "grad_norm": 0.7784757018089294, |
| "learning_rate": 8.139424703891709e-05, |
| "loss": 3.1581, |
| "step": 2750 |
| }, |
| { |
| "epoch": 0.6091370558375635, |
| "grad_norm": 0.7557055354118347, |
| "learning_rate": 7.970219966159053e-05, |
| "loss": 3.157, |
| "step": 3000 |
| }, |
| { |
| "epoch": 0.6091370558375635, |
| "eval_loss": 2.6753175258636475, |
| "eval_runtime": 89.0244, |
| "eval_samples_per_second": 110.644, |
| "eval_steps_per_second": 3.696, |
| "step": 3000 |
| } |
| ], |
| "logging_steps": 250, |
| "max_steps": 14775, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 3, |
| "save_steps": 1000, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 2030929182720000.0, |
| "train_batch_size": 30, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|