| { |
| "best_global_step": null, |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 1.3319126265316994, |
| "eval_steps": 500, |
| "global_step": 2500, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.2663825253063399, |
| "grad_norm": 4.165382385253906, |
| "learning_rate": 1.822766826496182e-05, |
| "loss": 2.1046, |
| "mean_token_accuracy": 0.5178786942958832, |
| "num_tokens": 6104570.0, |
| "step": 500 |
| }, |
| { |
| "epoch": 0.5327650506126798, |
| "grad_norm": 4.388947010040283, |
| "learning_rate": 1.6451784762919554e-05, |
| "loss": 1.9567, |
| "mean_token_accuracy": 0.5437576373815537, |
| "num_tokens": 12233138.0, |
| "step": 1000 |
| }, |
| { |
| "epoch": 0.7991475759190197, |
| "grad_norm": 4.746854782104492, |
| "learning_rate": 1.4675901260877288e-05, |
| "loss": 1.7979, |
| "mean_token_accuracy": 0.5719504290819168, |
| "num_tokens": 18348342.0, |
| "step": 1500 |
| }, |
| { |
| "epoch": 1.0655301012253595, |
| "grad_norm": 5.537892818450928, |
| "learning_rate": 1.2900017758835022e-05, |
| "loss": 1.4819, |
| "mean_token_accuracy": 0.6352935122847557, |
| "num_tokens": 24492082.0, |
| "step": 2000 |
| }, |
| { |
| "epoch": 1.3319126265316994, |
| "grad_norm": 5.861352443695068, |
| "learning_rate": 1.1124134256792756e-05, |
| "loss": 0.8953, |
| "mean_token_accuracy": 0.7642805895209313, |
| "num_tokens": 30611797.0, |
| "step": 2500 |
| } |
| ], |
| "logging_steps": 500, |
| "max_steps": 5631, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 3, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": false |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 8.904964430207386e+17, |
| "train_batch_size": 8, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|