rulins's picture
Upload folder using huggingface_hub
ea412fb verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 500,
"global_step": 395,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06329113924050633,
"grad_norm": 8.190221366047021,
"learning_rate": 4.000000000000001e-06,
"loss": 1.6528,
"step": 5
},
{
"epoch": 0.12658227848101267,
"grad_norm": 2.0302478881380908,
"learning_rate": 9e-06,
"loss": 1.433,
"step": 10
},
{
"epoch": 0.189873417721519,
"grad_norm": 0.9035030619527921,
"learning_rate": 1.4e-05,
"loss": 1.2997,
"step": 15
},
{
"epoch": 0.25316455696202533,
"grad_norm": 0.6411978704213375,
"learning_rate": 1.9e-05,
"loss": 1.2252,
"step": 20
},
{
"epoch": 0.31645569620253167,
"grad_norm": 0.4729734480029101,
"learning_rate": 2.4e-05,
"loss": 1.1621,
"step": 25
},
{
"epoch": 0.379746835443038,
"grad_norm": 0.39623906335790865,
"learning_rate": 2.9e-05,
"loss": 1.1294,
"step": 30
},
{
"epoch": 0.4430379746835443,
"grad_norm": 0.35838538383752355,
"learning_rate": 3.4e-05,
"loss": 1.1227,
"step": 35
},
{
"epoch": 0.5063291139240507,
"grad_norm": 0.3972117468174211,
"learning_rate": 3.9e-05,
"loss": 1.0958,
"step": 40
},
{
"epoch": 0.569620253164557,
"grad_norm": 0.38312860456168585,
"learning_rate": 3.998747096355221e-05,
"loss": 1.0891,
"step": 45
},
{
"epoch": 0.6329113924050633,
"grad_norm": 0.36710577735917843,
"learning_rate": 3.993659865551998e-05,
"loss": 1.0626,
"step": 50
},
{
"epoch": 0.6962025316455697,
"grad_norm": 0.3349682144567762,
"learning_rate": 3.984669951939583e-05,
"loss": 1.0429,
"step": 55
},
{
"epoch": 0.759493670886076,
"grad_norm": 0.3620969332045494,
"learning_rate": 3.971794953696041e-05,
"loss": 1.0611,
"step": 60
},
{
"epoch": 0.8227848101265823,
"grad_norm": 0.33980748949320766,
"learning_rate": 3.955060074235045e-05,
"loss": 1.0622,
"step": 65
},
{
"epoch": 0.8860759493670886,
"grad_norm": 0.35220506114218925,
"learning_rate": 3.934498072869008e-05,
"loss": 1.0448,
"step": 70
},
{
"epoch": 0.9493670886075949,
"grad_norm": 0.3345475666867474,
"learning_rate": 3.910149200681199e-05,
"loss": 1.0382,
"step": 75
},
{
"epoch": 1.0126582278481013,
"grad_norm": 0.5835942213568852,
"learning_rate": 3.882061121732349e-05,
"loss": 1.0114,
"step": 80
},
{
"epoch": 1.0759493670886076,
"grad_norm": 0.490279488075702,
"learning_rate": 3.850288819756019e-05,
"loss": 0.8861,
"step": 85
},
{
"epoch": 1.139240506329114,
"grad_norm": 0.36798975076726864,
"learning_rate": 3.814894490525356e-05,
"loss": 0.929,
"step": 90
},
{
"epoch": 1.2025316455696202,
"grad_norm": 0.4166137764276443,
"learning_rate": 3.775947420101948e-05,
"loss": 0.888,
"step": 95
},
{
"epoch": 1.2658227848101267,
"grad_norm": 0.36773960752890966,
"learning_rate": 3.733523849205105e-05,
"loss": 0.8759,
"step": 100
},
{
"epoch": 1.3291139240506329,
"grad_norm": 0.31732076539896936,
"learning_rate": 3.687706823967073e-05,
"loss": 0.9184,
"step": 105
},
{
"epoch": 1.3924050632911391,
"grad_norm": 0.35595264578026214,
"learning_rate": 3.6385860333663236e-05,
"loss": 0.8784,
"step": 110
},
{
"epoch": 1.4556962025316456,
"grad_norm": 0.33660269368665974,
"learning_rate": 3.5862576336571725e-05,
"loss": 0.8814,
"step": 115
},
{
"epoch": 1.518987341772152,
"grad_norm": 0.38557057544668505,
"learning_rate": 3.530824060139396e-05,
"loss": 0.9016,
"step": 120
},
{
"epoch": 1.5822784810126582,
"grad_norm": 0.33702770153593836,
"learning_rate": 3.472393826636317e-05,
"loss": 0.9145,
"step": 125
},
{
"epoch": 1.6455696202531644,
"grad_norm": 0.3451811525905654,
"learning_rate": 3.411081313073906e-05,
"loss": 0.8916,
"step": 130
},
{
"epoch": 1.7088607594936709,
"grad_norm": 0.35839003670208736,
"learning_rate": 3.3470065415767004e-05,
"loss": 0.8931,
"step": 135
},
{
"epoch": 1.7721518987341773,
"grad_norm": 0.35474577846302596,
"learning_rate": 3.28029494151886e-05,
"loss": 0.8628,
"step": 140
},
{
"epoch": 1.8354430379746836,
"grad_norm": 0.33305048478780597,
"learning_rate": 3.211077103990278e-05,
"loss": 0.8668,
"step": 145
},
{
"epoch": 1.8987341772151898,
"grad_norm": 0.3534851157371203,
"learning_rate": 3.13948852615839e-05,
"loss": 0.8731,
"step": 150
},
{
"epoch": 1.9620253164556962,
"grad_norm": 0.3314251565751023,
"learning_rate": 3.065669346026106e-05,
"loss": 0.8877,
"step": 155
},
{
"epoch": 2.0253164556962027,
"grad_norm": 0.528797556904215,
"learning_rate": 2.9897640681050877e-05,
"loss": 0.8092,
"step": 160
},
{
"epoch": 2.088607594936709,
"grad_norm": 0.44442500024474996,
"learning_rate": 2.91192128054138e-05,
"loss": 0.7009,
"step": 165
},
{
"epoch": 2.151898734177215,
"grad_norm": 0.416444200678735,
"learning_rate": 2.832293364247141e-05,
"loss": 0.7011,
"step": 170
},
{
"epoch": 2.2151898734177213,
"grad_norm": 0.42969912298795,
"learning_rate": 2.7510361946078482e-05,
"loss": 0.7099,
"step": 175
},
{
"epoch": 2.278481012658228,
"grad_norm": 0.3916144378079092,
"learning_rate": 2.6683088363489118e-05,
"loss": 0.6843,
"step": 180
},
{
"epoch": 2.3417721518987342,
"grad_norm": 0.3513696446056463,
"learning_rate": 2.5842732321590034e-05,
"loss": 0.6784,
"step": 185
},
{
"epoch": 2.4050632911392404,
"grad_norm": 0.37722195477379744,
"learning_rate": 2.499093885679642e-05,
"loss": 0.6919,
"step": 190
},
{
"epoch": 2.4683544303797467,
"grad_norm": 0.3693875329737844,
"learning_rate": 2.4129375394815878e-05,
"loss": 0.6813,
"step": 195
},
{
"epoch": 2.5316455696202533,
"grad_norm": 0.36550476454804565,
"learning_rate": 2.3259728486584297e-05,
"loss": 0.7073,
"step": 200
},
{
"epoch": 2.5949367088607596,
"grad_norm": 0.36673126123218974,
"learning_rate": 2.2383700506763204e-05,
"loss": 0.6961,
"step": 205
},
{
"epoch": 2.6582278481012658,
"grad_norm": 0.3574666079741072,
"learning_rate": 2.150300632126142e-05,
"loss": 0.6864,
"step": 210
},
{
"epoch": 2.721518987341772,
"grad_norm": 0.37617726769282556,
"learning_rate": 2.061936993030451e-05,
"loss": 0.6952,
"step": 215
},
{
"epoch": 2.7848101265822782,
"grad_norm": 0.3656388281852626,
"learning_rate": 1.9734521093623388e-05,
"loss": 0.6829,
"step": 220
},
{
"epoch": 2.848101265822785,
"grad_norm": 0.33883760060351703,
"learning_rate": 1.88501919443684e-05,
"loss": 0.6969,
"step": 225
},
{
"epoch": 2.911392405063291,
"grad_norm": 0.35981527876374314,
"learning_rate": 1.7968113598377356e-05,
"loss": 0.6984,
"step": 230
},
{
"epoch": 2.9746835443037973,
"grad_norm": 0.3670812501059382,
"learning_rate": 1.7090012765434974e-05,
"loss": 0.69,
"step": 235
},
{
"epoch": 3.037974683544304,
"grad_norm": 0.49917176315512335,
"learning_rate": 1.6217608369157417e-05,
"loss": 0.6373,
"step": 240
},
{
"epoch": 3.1012658227848102,
"grad_norm": 0.5918566188787662,
"learning_rate": 1.5352608182118546e-05,
"loss": 0.539,
"step": 245
},
{
"epoch": 3.1645569620253164,
"grad_norm": 0.5041512334832167,
"learning_rate": 1.4496705482804943e-05,
"loss": 0.5206,
"step": 250
},
{
"epoch": 3.2278481012658227,
"grad_norm": 0.45653261940890033,
"learning_rate": 1.3651575740943746e-05,
"loss": 0.5281,
"step": 255
},
{
"epoch": 3.291139240506329,
"grad_norm": 0.43851629336794623,
"learning_rate": 1.2818873337691993e-05,
"loss": 0.5172,
"step": 260
},
{
"epoch": 3.3544303797468356,
"grad_norm": 0.40824593110817303,
"learning_rate": 1.2000228327107787e-05,
"loss": 0.5416,
"step": 265
},
{
"epoch": 3.4177215189873418,
"grad_norm": 0.4125095093522506,
"learning_rate": 1.1197243245242978e-05,
"loss": 0.5105,
"step": 270
},
{
"epoch": 3.481012658227848,
"grad_norm": 0.39546638971159587,
"learning_rate": 1.0411489973103525e-05,
"loss": 0.526,
"step": 275
},
{
"epoch": 3.5443037974683547,
"grad_norm": 0.3979523063934119,
"learning_rate": 9.64450665961866e-06,
"loss": 0.5052,
"step": 280
},
{
"epoch": 3.607594936708861,
"grad_norm": 0.4007566972264219,
"learning_rate": 8.897794710642098e-06,
"loss": 0.5141,
"step": 285
},
{
"epoch": 3.670886075949367,
"grad_norm": 0.39854536610455366,
"learning_rate": 8.172815849879607e-06,
"loss": 0.5337,
"step": 290
},
{
"epoch": 3.7341772151898733,
"grad_norm": 0.38032901846122297,
"learning_rate": 7.470989257496164e-06,
"loss": 0.5187,
"step": 295
},
{
"epoch": 3.7974683544303796,
"grad_norm": 0.37263622065605867,
"learning_rate": 6.7936887920041825e-06,
"loss": 0.5111,
"step": 300
},
{
"epoch": 3.8607594936708862,
"grad_norm": 0.39103621070748484,
"learning_rate": 6.1422403008709255e-06,
"loss": 0.5328,
"step": 305
},
{
"epoch": 3.9240506329113924,
"grad_norm": 0.3820249737884236,
"learning_rate": 5.517919025109839e-06,
"loss": 0.51,
"step": 310
},
{
"epoch": 3.9873417721518987,
"grad_norm": 0.3819442127092411,
"learning_rate": 4.921947102936388e-06,
"loss": 0.5098,
"step": 315
},
{
"epoch": 4.050632911392405,
"grad_norm": 0.45292407093983916,
"learning_rate": 4.3554911773751e-06,
"loss": 0.4576,
"step": 320
},
{
"epoch": 4.113924050632911,
"grad_norm": 0.6815364711164434,
"learning_rate": 3.819660112501053e-06,
"loss": 0.4179,
"step": 325
},
{
"epoch": 4.177215189873418,
"grad_norm": 0.40656558419724,
"learning_rate": 3.315502822786407e-06,
"loss": 0.3929,
"step": 330
},
{
"epoch": 4.2405063291139244,
"grad_norm": 0.44333659988830937,
"learning_rate": 2.8440062198010187e-06,
"loss": 0.4279,
"step": 335
},
{
"epoch": 4.30379746835443,
"grad_norm": 0.3979679289743368,
"learning_rate": 2.4060932802867498e-06,
"loss": 0.4249,
"step": 340
},
{
"epoch": 4.367088607594937,
"grad_norm": 0.3910383908124771,
"learning_rate": 2.0026212393871057e-06,
"loss": 0.4119,
"step": 345
},
{
"epoch": 4.430379746835443,
"grad_norm": 0.41359872762990135,
"learning_rate": 1.6343799125692194e-06,
"loss": 0.4302,
"step": 350
},
{
"epoch": 4.493670886075949,
"grad_norm": 0.36837247694605507,
"learning_rate": 1.3020901495229632e-06,
"loss": 0.4067,
"step": 355
},
{
"epoch": 4.556962025316456,
"grad_norm": 0.37212695119767214,
"learning_rate": 1.0064024230638547e-06,
"loss": 0.4346,
"step": 360
},
{
"epoch": 4.620253164556962,
"grad_norm": 0.3829623334903917,
"learning_rate": 7.478955558019408e-07,
"loss": 0.412,
"step": 365
},
{
"epoch": 4.6835443037974684,
"grad_norm": 0.38398801975165414,
"learning_rate": 5.270755870693877e-07,
"loss": 0.4171,
"step": 370
},
{
"epoch": 4.746835443037975,
"grad_norm": 0.3902403567855495,
"learning_rate": 3.4437478232470123e-07,
"loss": 0.4082,
"step": 375
},
{
"epoch": 4.810126582278481,
"grad_norm": 0.38211563038112484,
"learning_rate": 2.0015078697281477e-07,
"loss": 0.4122,
"step": 380
},
{
"epoch": 4.8734177215189876,
"grad_norm": 0.387782528773728,
"learning_rate": 9.46859262573896e-08,
"loss": 0.4003,
"step": 385
},
{
"epoch": 4.936708860759493,
"grad_norm": 0.41433765454771804,
"learning_rate": 2.8186652595918464e-08,
"loss": 0.4297,
"step": 390
},
{
"epoch": 5.0,
"grad_norm": 0.37399343528203305,
"learning_rate": 7.831414393999481e-10,
"loss": 0.4124,
"step": 395
},
{
"epoch": 5.0,
"step": 395,
"total_flos": 717096447639552.0,
"train_loss": 0.7372651619247244,
"train_runtime": 37731.7684,
"train_samples_per_second": 1.34,
"train_steps_per_second": 0.01
}
],
"logging_steps": 5,
"max_steps": 395,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": false,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 717096447639552.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}