| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.6005378409527469, | |
| "eval_steps": 600, | |
| "global_step": 10416, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.00768344218209758, | |
| "grad_norm": 24.238063812163187, | |
| "learning_rate": 1.881720430107527e-07, | |
| "loss": 0.3364, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.01536688436419516, | |
| "grad_norm": 6.319806837540333, | |
| "learning_rate": 3.801843317972351e-07, | |
| "loss": 0.2362, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.02305032654629274, | |
| "grad_norm": 5.317146309656271, | |
| "learning_rate": 5.721966205837174e-07, | |
| "loss": 0.1782, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.03073376872839032, | |
| "grad_norm": 6.141914479493457, | |
| "learning_rate": 7.642089093701997e-07, | |
| "loss": 0.1538, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.0384172109104879, | |
| "grad_norm": 6.3393321544201955, | |
| "learning_rate": 9.562211981566821e-07, | |
| "loss": 0.1399, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.04610065309258548, | |
| "grad_norm": 7.1957549897032775, | |
| "learning_rate": 1.1482334869431644e-06, | |
| "loss": 0.1279, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.05378409527468306, | |
| "grad_norm": 4.981889336531056, | |
| "learning_rate": 1.340245775729647e-06, | |
| "loss": 0.1238, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.06146753745678064, | |
| "grad_norm": 5.469128876608933, | |
| "learning_rate": 1.5322580645161292e-06, | |
| "loss": 0.1181, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.06915097963887822, | |
| "grad_norm": 4.228572813571115, | |
| "learning_rate": 1.7242703533026115e-06, | |
| "loss": 0.1129, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.0768344218209758, | |
| "grad_norm": 4.635063301103817, | |
| "learning_rate": 1.916282642089094e-06, | |
| "loss": 0.1085, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.08451786400307337, | |
| "grad_norm": 4.57628839065234, | |
| "learning_rate": 2.108294930875576e-06, | |
| "loss": 0.1012, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.09220130618517096, | |
| "grad_norm": 4.631658068697841, | |
| "learning_rate": 2.3003072196620586e-06, | |
| "loss": 0.1016, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.09220130618517096, | |
| "eval_loss": 0.10174024850130081, | |
| "eval_runtime": 18.0705, | |
| "eval_samples_per_second": 203.702, | |
| "eval_steps_per_second": 6.419, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.09988474836726853, | |
| "grad_norm": 4.666177854862017, | |
| "learning_rate": 2.492319508448541e-06, | |
| "loss": 0.0966, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.10756819054936612, | |
| "grad_norm": 3.667615984584937, | |
| "learning_rate": 2.684331797235023e-06, | |
| "loss": 0.0999, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.1152516327314637, | |
| "grad_norm": 3.2135487366487427, | |
| "learning_rate": 2.8763440860215057e-06, | |
| "loss": 0.0969, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.12293507491356127, | |
| "grad_norm": 3.5733005027516476, | |
| "learning_rate": 3.068356374807988e-06, | |
| "loss": 0.0934, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.13061851709565886, | |
| "grad_norm": 4.3597956101093605, | |
| "learning_rate": 3.2603686635944703e-06, | |
| "loss": 0.0931, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.13830195927775643, | |
| "grad_norm": 3.2540960430191364, | |
| "learning_rate": 3.4523809523809528e-06, | |
| "loss": 0.0935, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.145985401459854, | |
| "grad_norm": 4.391490641674597, | |
| "learning_rate": 3.644393241167435e-06, | |
| "loss": 0.1001, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.1536688436419516, | |
| "grad_norm": 4.80552168899774, | |
| "learning_rate": 3.836405529953917e-06, | |
| "loss": 0.0942, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.16135228582404917, | |
| "grad_norm": 2.7917815686351064, | |
| "learning_rate": 4.0284178187404e-06, | |
| "loss": 0.0932, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.16903572800614675, | |
| "grad_norm": 3.8315415753727153, | |
| "learning_rate": 4.220430107526882e-06, | |
| "loss": 0.0866, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.17671917018824435, | |
| "grad_norm": 3.0820620211058003, | |
| "learning_rate": 4.4124423963133644e-06, | |
| "loss": 0.0899, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.18440261237034192, | |
| "grad_norm": 3.4482968111997243, | |
| "learning_rate": 4.604454685099847e-06, | |
| "loss": 0.0885, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.18440261237034192, | |
| "eval_loss": 0.09638471901416779, | |
| "eval_runtime": 17.8744, | |
| "eval_samples_per_second": 205.938, | |
| "eval_steps_per_second": 6.49, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.1920860545524395, | |
| "grad_norm": 4.096325077313065, | |
| "learning_rate": 4.796466973886329e-06, | |
| "loss": 0.0907, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.19976949673453706, | |
| "grad_norm": 5.171403712016945, | |
| "learning_rate": 4.988479262672811e-06, | |
| "loss": 0.0891, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.20745293891663466, | |
| "grad_norm": 2.599652616837112, | |
| "learning_rate": 4.999801395408516e-06, | |
| "loss": 0.0885, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.21513638109873223, | |
| "grad_norm": 2.593935050173008, | |
| "learning_rate": 4.9991541014222635e-06, | |
| "loss": 0.0846, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.2228198232808298, | |
| "grad_norm": 2.4852578892924675, | |
| "learning_rate": 4.998057425259791e-06, | |
| "loss": 0.0857, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.2305032654629274, | |
| "grad_norm": 3.1893069499093554, | |
| "learning_rate": 4.99651156411851e-06, | |
| "loss": 0.0852, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.23818670764502498, | |
| "grad_norm": 2.3528305412915476, | |
| "learning_rate": 4.99451679596545e-06, | |
| "loss": 0.0866, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.24587014982712255, | |
| "grad_norm": 2.6041772725181214, | |
| "learning_rate": 4.9920734794872795e-06, | |
| "loss": 0.0885, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.2535535920092201, | |
| "grad_norm": 2.890143221665928, | |
| "learning_rate": 4.989182054025802e-06, | |
| "loss": 0.0856, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.2612370341913177, | |
| "grad_norm": 3.0480203743032686, | |
| "learning_rate": 4.985843039498966e-06, | |
| "loss": 0.0873, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.26892047637341526, | |
| "grad_norm": 2.94553715320027, | |
| "learning_rate": 4.982057036307365e-06, | |
| "loss": 0.083, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.27660391855551286, | |
| "grad_norm": 2.8391900367379077, | |
| "learning_rate": 4.97782472522629e-06, | |
| "loss": 0.0851, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.27660391855551286, | |
| "eval_loss": 0.08491890132427216, | |
| "eval_runtime": 17.8473, | |
| "eval_samples_per_second": 206.249, | |
| "eval_steps_per_second": 6.5, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.28428736073761046, | |
| "grad_norm": 3.4124198348599273, | |
| "learning_rate": 4.973146867283307e-06, | |
| "loss": 0.0795, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.291970802919708, | |
| "grad_norm": 2.556801090484712, | |
| "learning_rate": 4.968024303621417e-06, | |
| "loss": 0.0821, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.2996542451018056, | |
| "grad_norm": 2.5418464816752753, | |
| "learning_rate": 4.9624579553478065e-06, | |
| "loss": 0.0842, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.3073376872839032, | |
| "grad_norm": 2.8344117271063496, | |
| "learning_rate": 4.956448823368225e-06, | |
| "loss": 0.0866, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.31502112946600075, | |
| "grad_norm": 2.5199127327344018, | |
| "learning_rate": 4.949997988206998e-06, | |
| "loss": 0.0838, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 0.32270457164809835, | |
| "grad_norm": 2.622495155727973, | |
| "learning_rate": 4.943106609812742e-06, | |
| "loss": 0.0813, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.33038801383019595, | |
| "grad_norm": 2.9840472337428823, | |
| "learning_rate": 4.9357759273497906e-06, | |
| "loss": 0.0815, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 0.3380714560122935, | |
| "grad_norm": 2.886419271879702, | |
| "learning_rate": 4.928007258975368e-06, | |
| "loss": 0.0833, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.3457548981943911, | |
| "grad_norm": 2.083232239480286, | |
| "learning_rate": 4.919802001602572e-06, | |
| "loss": 0.0794, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 0.3534383403764887, | |
| "grad_norm": 2.300324464493836, | |
| "learning_rate": 4.911161630649194e-06, | |
| "loss": 0.0779, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.36112178255858624, | |
| "grad_norm": 2.642556591382307, | |
| "learning_rate": 4.9020876997724055e-06, | |
| "loss": 0.0845, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 0.36880522474068383, | |
| "grad_norm": 2.8343151813972813, | |
| "learning_rate": 4.892581840589403e-06, | |
| "loss": 0.0755, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.36880522474068383, | |
| "eval_loss": 0.08218714594841003, | |
| "eval_runtime": 19.8012, | |
| "eval_samples_per_second": 185.898, | |
| "eval_steps_per_second": 5.858, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.3764886669227814, | |
| "grad_norm": 2.3530318777857326, | |
| "learning_rate": 4.882645762384014e-06, | |
| "loss": 0.078, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 0.384172109104879, | |
| "grad_norm": 2.896149397695564, | |
| "learning_rate": 4.872281251799343e-06, | |
| "loss": 0.077, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.3918555512869766, | |
| "grad_norm": 2.769499482502948, | |
| "learning_rate": 4.861490172516515e-06, | |
| "loss": 0.0822, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 0.3995389934690741, | |
| "grad_norm": 2.6614111820201334, | |
| "learning_rate": 4.850274464919552e-06, | |
| "loss": 0.0845, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.4072224356511717, | |
| "grad_norm": 2.6687830377318775, | |
| "learning_rate": 4.838636145746472e-06, | |
| "loss": 0.0828, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 0.4149058778332693, | |
| "grad_norm": 2.6222942749225124, | |
| "learning_rate": 4.8265773077266505e-06, | |
| "loss": 0.082, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.42258932001536686, | |
| "grad_norm": 2.857585092349033, | |
| "learning_rate": 4.814100119204515e-06, | |
| "loss": 0.0827, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 0.43027276219746446, | |
| "grad_norm": 3.0718710372112112, | |
| "learning_rate": 4.801206823749649e-06, | |
| "loss": 0.0764, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.43795620437956206, | |
| "grad_norm": 2.3991559160863427, | |
| "learning_rate": 4.787899739753372e-06, | |
| "loss": 0.0818, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 0.4456396465616596, | |
| "grad_norm": 2.0645648250964044, | |
| "learning_rate": 4.774181260011856e-06, | |
| "loss": 0.0725, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.4533230887437572, | |
| "grad_norm": 2.030851345419917, | |
| "learning_rate": 4.760053851295867e-06, | |
| "loss": 0.081, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 0.4610065309258548, | |
| "grad_norm": 2.8074770711893953, | |
| "learning_rate": 4.74552005390721e-06, | |
| "loss": 0.0726, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.4610065309258548, | |
| "eval_loss": 0.08013278245925903, | |
| "eval_runtime": 17.8924, | |
| "eval_samples_per_second": 205.729, | |
| "eval_steps_per_second": 6.483, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.46868997310795235, | |
| "grad_norm": 2.359703128276483, | |
| "learning_rate": 4.730582481221945e-06, | |
| "loss": 0.0812, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 0.47637341529004995, | |
| "grad_norm": 1.8778064498183962, | |
| "learning_rate": 4.715243819220467e-06, | |
| "loss": 0.0826, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.4840568574721475, | |
| "grad_norm": 1.9963395486102375, | |
| "learning_rate": 4.6995068260045296e-06, | |
| "loss": 0.0769, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 0.4917402996542451, | |
| "grad_norm": 2.5211485683446644, | |
| "learning_rate": 4.6833743313013016e-06, | |
| "loss": 0.0783, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.4994237418363427, | |
| "grad_norm": 1.742500631708806, | |
| "learning_rate": 4.666849235954542e-06, | |
| "loss": 0.0746, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 0.5071071840184402, | |
| "grad_norm": 2.349151557568936, | |
| "learning_rate": 4.649934511402988e-06, | |
| "loss": 0.0762, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.5147906262005378, | |
| "grad_norm": 2.6526774252985335, | |
| "learning_rate": 4.632633199146052e-06, | |
| "loss": 0.0803, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 0.5224740683826354, | |
| "grad_norm": 2.21236960042971, | |
| "learning_rate": 4.614948410196916e-06, | |
| "loss": 0.081, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.530157510564733, | |
| "grad_norm": 2.5197036225433234, | |
| "learning_rate": 4.59688332452313e-06, | |
| "loss": 0.078, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 0.5378409527468305, | |
| "grad_norm": 2.702928580540892, | |
| "learning_rate": 4.578441190474809e-06, | |
| "loss": 0.0775, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.5455243949289281, | |
| "grad_norm": 2.3720905733220365, | |
| "learning_rate": 4.559625324200536e-06, | |
| "loss": 0.0738, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 0.5532078371110257, | |
| "grad_norm": 2.922617471629335, | |
| "learning_rate": 4.540439109051073e-06, | |
| "loss": 0.0823, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.5532078371110257, | |
| "eval_loss": 0.07812748104333878, | |
| "eval_runtime": 17.9237, | |
| "eval_samples_per_second": 205.371, | |
| "eval_steps_per_second": 6.472, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.5608912792931233, | |
| "grad_norm": 2.7852795271037087, | |
| "learning_rate": 4.520885994970989e-06, | |
| "loss": 0.0743, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 0.5685747214752209, | |
| "grad_norm": 1.909529044630668, | |
| "learning_rate": 4.500969497878309e-06, | |
| "loss": 0.0746, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 0.5762581636573185, | |
| "grad_norm": 2.3252755227402746, | |
| "learning_rate": 4.480693199032311e-06, | |
| "loss": 0.0732, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 0.583941605839416, | |
| "grad_norm": 2.2587598776573223, | |
| "learning_rate": 4.460060744389557e-06, | |
| "loss": 0.0746, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.5916250480215136, | |
| "grad_norm": 2.316511266907094, | |
| "learning_rate": 4.4390758439483086e-06, | |
| "loss": 0.0762, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 0.5993084902036112, | |
| "grad_norm": 2.2442036907986904, | |
| "learning_rate": 4.417742271081412e-06, | |
| "loss": 0.0747, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.6069919323857088, | |
| "grad_norm": 1.6981947121024932, | |
| "learning_rate": 4.3960638618578e-06, | |
| "loss": 0.0705, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 0.6146753745678064, | |
| "grad_norm": 2.461814042189026, | |
| "learning_rate": 4.3740445143527065e-06, | |
| "loss": 0.0767, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.622358816749904, | |
| "grad_norm": 1.758351017204179, | |
| "learning_rate": 4.351688187946746e-06, | |
| "loss": 0.0736, | |
| "step": 4050 | |
| }, | |
| { | |
| "epoch": 0.6300422589320015, | |
| "grad_norm": 2.5571250675031107, | |
| "learning_rate": 4.328998902613962e-06, | |
| "loss": 0.0724, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 0.6377257011140991, | |
| "grad_norm": 2.349674273175025, | |
| "learning_rate": 4.3059807381989724e-06, | |
| "loss": 0.0739, | |
| "step": 4150 | |
| }, | |
| { | |
| "epoch": 0.6454091432961967, | |
| "grad_norm": 2.187200746633409, | |
| "learning_rate": 4.282637833683371e-06, | |
| "loss": 0.0728, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 0.6454091432961967, | |
| "eval_loss": 0.0765346810221672, | |
| "eval_runtime": 27.9428, | |
| "eval_samples_per_second": 131.733, | |
| "eval_steps_per_second": 4.151, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 0.6530925854782943, | |
| "grad_norm": 2.1984417750602483, | |
| "learning_rate": 4.258974386441469e-06, | |
| "loss": 0.0745, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 0.6607760276603919, | |
| "grad_norm": 1.9098478006043746, | |
| "learning_rate": 4.2349946514855585e-06, | |
| "loss": 0.073, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 0.6684594698424894, | |
| "grad_norm": 2.1688877445765624, | |
| "learning_rate": 4.210702940700798e-06, | |
| "loss": 0.0688, | |
| "step": 4350 | |
| }, | |
| { | |
| "epoch": 0.676142912024587, | |
| "grad_norm": 2.2260961466321536, | |
| "learning_rate": 4.186103622069879e-06, | |
| "loss": 0.0715, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 0.6838263542066846, | |
| "grad_norm": 1.979308479261721, | |
| "learning_rate": 4.161201118887599e-06, | |
| "loss": 0.071, | |
| "step": 4450 | |
| }, | |
| { | |
| "epoch": 0.6915097963887822, | |
| "grad_norm": 1.9600879521664334, | |
| "learning_rate": 4.135999908965499e-06, | |
| "loss": 0.0742, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.6991932385708798, | |
| "grad_norm": 2.4238054914257483, | |
| "learning_rate": 4.110504523826685e-06, | |
| "loss": 0.0715, | |
| "step": 4550 | |
| }, | |
| { | |
| "epoch": 0.7068766807529774, | |
| "grad_norm": 2.1089065381121017, | |
| "learning_rate": 4.0847195478910015e-06, | |
| "loss": 0.0734, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 0.7145601229350749, | |
| "grad_norm": 2.473769749167304, | |
| "learning_rate": 4.058649617650691e-06, | |
| "loss": 0.076, | |
| "step": 4650 | |
| }, | |
| { | |
| "epoch": 0.7222435651171725, | |
| "grad_norm": 2.13037508106318, | |
| "learning_rate": 4.0322994208366826e-06, | |
| "loss": 0.0679, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 0.7299270072992701, | |
| "grad_norm": 2.5081434982999986, | |
| "learning_rate": 4.005673695575684e-06, | |
| "loss": 0.0714, | |
| "step": 4750 | |
| }, | |
| { | |
| "epoch": 0.7376104494813677, | |
| "grad_norm": 2.523258547710937, | |
| "learning_rate": 3.978777229538191e-06, | |
| "loss": 0.0726, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 0.7376104494813677, | |
| "eval_loss": 0.07391650229692459, | |
| "eval_runtime": 17.867, | |
| "eval_samples_per_second": 206.022, | |
| "eval_steps_per_second": 6.492, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 0.7452938916634653, | |
| "grad_norm": 2.2265665380044086, | |
| "learning_rate": 3.951614859077608e-06, | |
| "loss": 0.0703, | |
| "step": 4850 | |
| }, | |
| { | |
| "epoch": 0.7529773338455628, | |
| "grad_norm": 2.381066328795494, | |
| "learning_rate": 3.924191468360597e-06, | |
| "loss": 0.0744, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 0.7606607760276604, | |
| "grad_norm": 2.8734977243388187, | |
| "learning_rate": 3.896511988488843e-06, | |
| "loss": 0.0705, | |
| "step": 4950 | |
| }, | |
| { | |
| "epoch": 0.768344218209758, | |
| "grad_norm": 2.7655237374601267, | |
| "learning_rate": 3.8685813966123705e-06, | |
| "loss": 0.0722, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.7760276603918556, | |
| "grad_norm": 2.0489765527392705, | |
| "learning_rate": 3.84040471503459e-06, | |
| "loss": 0.0714, | |
| "step": 5050 | |
| }, | |
| { | |
| "epoch": 0.7837111025739532, | |
| "grad_norm": 2.130498868865807, | |
| "learning_rate": 3.8119870103092117e-06, | |
| "loss": 0.0712, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 0.7913945447560508, | |
| "grad_norm": 2.156181891653775, | |
| "learning_rate": 3.783333392329212e-06, | |
| "loss": 0.0706, | |
| "step": 5150 | |
| }, | |
| { | |
| "epoch": 0.7990779869381482, | |
| "grad_norm": 1.9223235226472895, | |
| "learning_rate": 3.754449013408007e-06, | |
| "loss": 0.0697, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 0.8067614291202458, | |
| "grad_norm": 2.8205361640798023, | |
| "learning_rate": 3.7253390673529943e-06, | |
| "loss": 0.0694, | |
| "step": 5250 | |
| }, | |
| { | |
| "epoch": 0.8144448713023434, | |
| "grad_norm": 2.5866793948731925, | |
| "learning_rate": 3.6960087885316343e-06, | |
| "loss": 0.0754, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 0.822128313484441, | |
| "grad_norm": 1.6273019631219985, | |
| "learning_rate": 3.66646345093024e-06, | |
| "loss": 0.0746, | |
| "step": 5350 | |
| }, | |
| { | |
| "epoch": 0.8298117556665386, | |
| "grad_norm": 2.3776525917677334, | |
| "learning_rate": 3.6367083672056425e-06, | |
| "loss": 0.0687, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 0.8298117556665386, | |
| "eval_loss": 0.07365307211875916, | |
| "eval_runtime": 17.8652, | |
| "eval_samples_per_second": 206.043, | |
| "eval_steps_per_second": 6.493, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 0.8374951978486362, | |
| "grad_norm": 2.147338961225076, | |
| "learning_rate": 3.6067488877299015e-06, | |
| "loss": 0.0681, | |
| "step": 5450 | |
| }, | |
| { | |
| "epoch": 0.8451786400307337, | |
| "grad_norm": 1.953555336915205, | |
| "learning_rate": 3.576590399628237e-06, | |
| "loss": 0.0697, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.8528620822128313, | |
| "grad_norm": 2.657233757861753, | |
| "learning_rate": 3.5462383258103463e-06, | |
| "loss": 0.0692, | |
| "step": 5550 | |
| }, | |
| { | |
| "epoch": 0.8605455243949289, | |
| "grad_norm": 2.366202874241117, | |
| "learning_rate": 3.5156981239952948e-06, | |
| "loss": 0.0721, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 0.8682289665770265, | |
| "grad_norm": 2.0264884945952657, | |
| "learning_rate": 3.48497528573014e-06, | |
| "loss": 0.0727, | |
| "step": 5650 | |
| }, | |
| { | |
| "epoch": 0.8759124087591241, | |
| "grad_norm": 1.887823865598268, | |
| "learning_rate": 3.454075335402479e-06, | |
| "loss": 0.0713, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 0.8835958509412216, | |
| "grad_norm": 2.658475527954461, | |
| "learning_rate": 3.423003829247084e-06, | |
| "loss": 0.0663, | |
| "step": 5750 | |
| }, | |
| { | |
| "epoch": 0.8912792931233192, | |
| "grad_norm": 1.6406842789595826, | |
| "learning_rate": 3.3917663543468215e-06, | |
| "loss": 0.0706, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 0.8989627353054168, | |
| "grad_norm": 2.314778258305481, | |
| "learning_rate": 3.3603685276280096e-06, | |
| "loss": 0.0681, | |
| "step": 5850 | |
| }, | |
| { | |
| "epoch": 0.9066461774875144, | |
| "grad_norm": 2.190779353343635, | |
| "learning_rate": 3.3288159948504257e-06, | |
| "loss": 0.0683, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 0.914329619669612, | |
| "grad_norm": 1.6532618939938297, | |
| "learning_rate": 3.2971144295921153e-06, | |
| "loss": 0.0689, | |
| "step": 5950 | |
| }, | |
| { | |
| "epoch": 0.9220130618517096, | |
| "grad_norm": 2.1027280690511527, | |
| "learning_rate": 3.2652695322292087e-06, | |
| "loss": 0.0692, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.9220130618517096, | |
| "eval_loss": 0.07242421805858612, | |
| "eval_runtime": 21.9858, | |
| "eval_samples_per_second": 167.426, | |
| "eval_steps_per_second": 5.276, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.9296965040338071, | |
| "grad_norm": 2.7504375473082465, | |
| "learning_rate": 3.233287028910914e-06, | |
| "loss": 0.0689, | |
| "step": 6050 | |
| }, | |
| { | |
| "epoch": 0.9373799462159047, | |
| "grad_norm": 2.316245889035153, | |
| "learning_rate": 3.2011726705298773e-06, | |
| "loss": 0.07, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 0.9450633883980023, | |
| "grad_norm": 2.410296028767442, | |
| "learning_rate": 3.1689322316880923e-06, | |
| "loss": 0.0724, | |
| "step": 6150 | |
| }, | |
| { | |
| "epoch": 0.9527468305800999, | |
| "grad_norm": 2.4485063146319805, | |
| "learning_rate": 3.1365715096585474e-06, | |
| "loss": 0.0678, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 0.9604302727621975, | |
| "grad_norm": 2.0681296725028933, | |
| "learning_rate": 3.1040963233428005e-06, | |
| "loss": 0.0669, | |
| "step": 6250 | |
| }, | |
| { | |
| "epoch": 0.968113714944295, | |
| "grad_norm": 2.2182760796494447, | |
| "learning_rate": 3.071512512224654e-06, | |
| "loss": 0.0683, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 0.9757971571263926, | |
| "grad_norm": 2.016693520489793, | |
| "learning_rate": 3.038825935320143e-06, | |
| "loss": 0.0682, | |
| "step": 6350 | |
| }, | |
| { | |
| "epoch": 0.9834805993084902, | |
| "grad_norm": 2.41156057499178, | |
| "learning_rate": 3.0060424701239982e-06, | |
| "loss": 0.0725, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 0.9911640414905878, | |
| "grad_norm": 2.8306464381949406, | |
| "learning_rate": 2.9731680115527918e-06, | |
| "loss": 0.0685, | |
| "step": 6450 | |
| }, | |
| { | |
| "epoch": 0.9988474836726854, | |
| "grad_norm": 2.4401019923715297, | |
| "learning_rate": 2.9402084708849566e-06, | |
| "loss": 0.0676, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.006454091432962, | |
| "grad_norm": 1.9066741002602279, | |
| "learning_rate": 2.90716977469785e-06, | |
| "loss": 0.052, | |
| "step": 6550 | |
| }, | |
| { | |
| "epoch": 1.0141375336150595, | |
| "grad_norm": 1.5268013929083424, | |
| "learning_rate": 2.8740578638020787e-06, | |
| "loss": 0.0531, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 1.0141375336150595, | |
| "eval_loss": 0.07169247418642044, | |
| "eval_runtime": 17.9984, | |
| "eval_samples_per_second": 204.518, | |
| "eval_steps_per_second": 6.445, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 1.021820975797157, | |
| "grad_norm": 1.7532162092101438, | |
| "learning_rate": 2.8408786921732568e-06, | |
| "loss": 0.0505, | |
| "step": 6650 | |
| }, | |
| { | |
| "epoch": 1.0295044179792547, | |
| "grad_norm": 1.9866953976458739, | |
| "learning_rate": 2.8076382258814005e-06, | |
| "loss": 0.0515, | |
| "step": 6700 | |
| }, | |
| { | |
| "epoch": 1.0371878601613522, | |
| "grad_norm": 1.86036393886952, | |
| "learning_rate": 2.7743424420181492e-06, | |
| "loss": 0.0489, | |
| "step": 6750 | |
| }, | |
| { | |
| "epoch": 1.04487130234345, | |
| "grad_norm": 1.6710747515829403, | |
| "learning_rate": 2.740997327621997e-06, | |
| "loss": 0.0499, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 1.0525547445255474, | |
| "grad_norm": 2.0799273695134226, | |
| "learning_rate": 2.7076088786017457e-06, | |
| "loss": 0.0515, | |
| "step": 6850 | |
| }, | |
| { | |
| "epoch": 1.060238186707645, | |
| "grad_norm": 2.580889185536816, | |
| "learning_rate": 2.6741830986583573e-06, | |
| "loss": 0.0514, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 1.0679216288897426, | |
| "grad_norm": 1.7167394019439424, | |
| "learning_rate": 2.640725998205405e-06, | |
| "loss": 0.0502, | |
| "step": 6950 | |
| }, | |
| { | |
| "epoch": 1.0756050710718401, | |
| "grad_norm": 2.085265353151105, | |
| "learning_rate": 2.6072435932883176e-06, | |
| "loss": 0.051, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.0832885132539378, | |
| "grad_norm": 1.976028725217046, | |
| "learning_rate": 2.573741904502618e-06, | |
| "loss": 0.0504, | |
| "step": 7050 | |
| }, | |
| { | |
| "epoch": 1.0909719554360353, | |
| "grad_norm": 2.065251142420112, | |
| "learning_rate": 2.540226955911328e-06, | |
| "loss": 0.049, | |
| "step": 7100 | |
| }, | |
| { | |
| "epoch": 1.098655397618133, | |
| "grad_norm": 1.9418713732405541, | |
| "learning_rate": 2.506704773961766e-06, | |
| "loss": 0.0497, | |
| "step": 7150 | |
| }, | |
| { | |
| "epoch": 1.1063388398002305, | |
| "grad_norm": 1.9522929472314208, | |
| "learning_rate": 2.47318138640191e-06, | |
| "loss": 0.0529, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 1.1063388398002305, | |
| "eval_loss": 0.0720512717962265, | |
| "eval_runtime": 20.6576, | |
| "eval_samples_per_second": 178.191, | |
| "eval_steps_per_second": 5.615, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 1.114022281982328, | |
| "grad_norm": 1.8217795610624572, | |
| "learning_rate": 2.43966282119652e-06, | |
| "loss": 0.0498, | |
| "step": 7250 | |
| }, | |
| { | |
| "epoch": 1.1217057241644257, | |
| "grad_norm": 2.211182366390281, | |
| "learning_rate": 2.4061551054432303e-06, | |
| "loss": 0.0494, | |
| "step": 7300 | |
| }, | |
| { | |
| "epoch": 1.1293891663465232, | |
| "grad_norm": 1.836486973368603, | |
| "learning_rate": 2.3726642642887977e-06, | |
| "loss": 0.0484, | |
| "step": 7350 | |
| }, | |
| { | |
| "epoch": 1.137072608528621, | |
| "grad_norm": 2.0872257595622092, | |
| "learning_rate": 2.3391963198456945e-06, | |
| "loss": 0.0459, | |
| "step": 7400 | |
| }, | |
| { | |
| "epoch": 1.1447560507107184, | |
| "grad_norm": 2.3771230389437243, | |
| "learning_rate": 2.3057572901092464e-06, | |
| "loss": 0.0489, | |
| "step": 7450 | |
| }, | |
| { | |
| "epoch": 1.1524394928928159, | |
| "grad_norm": 1.7766409365365587, | |
| "learning_rate": 2.2723531878755235e-06, | |
| "loss": 0.0514, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 1.1601229350749136, | |
| "grad_norm": 2.0041896806524613, | |
| "learning_rate": 2.238990019660148e-06, | |
| "loss": 0.0525, | |
| "step": 7550 | |
| }, | |
| { | |
| "epoch": 1.167806377257011, | |
| "grad_norm": 1.4220794048926986, | |
| "learning_rate": 2.2056737846182465e-06, | |
| "loss": 0.0495, | |
| "step": 7600 | |
| }, | |
| { | |
| "epoch": 1.1754898194391088, | |
| "grad_norm": 1.4059664663552494, | |
| "learning_rate": 2.1724104734657164e-06, | |
| "loss": 0.0473, | |
| "step": 7650 | |
| }, | |
| { | |
| "epoch": 1.1831732616212063, | |
| "grad_norm": 2.143768097788525, | |
| "learning_rate": 2.139206067402016e-06, | |
| "loss": 0.0508, | |
| "step": 7700 | |
| }, | |
| { | |
| "epoch": 1.1908567038033038, | |
| "grad_norm": 2.439141254991488, | |
| "learning_rate": 2.1060665370346577e-06, | |
| "loss": 0.0457, | |
| "step": 7750 | |
| }, | |
| { | |
| "epoch": 1.1985401459854015, | |
| "grad_norm": 2.1497324805762426, | |
| "learning_rate": 2.0729978413056167e-06, | |
| "loss": 0.0487, | |
| "step": 7800 | |
| }, | |
| { | |
| "epoch": 1.1985401459854015, | |
| "eval_loss": 0.0716494545340538, | |
| "eval_runtime": 17.9357, | |
| "eval_samples_per_second": 205.233, | |
| "eval_steps_per_second": 6.468, | |
| "step": 7800 | |
| }, | |
| { | |
| "epoch": 1.206223588167499, | |
| "grad_norm": 1.6548972664409785, | |
| "learning_rate": 2.040005926419829e-06, | |
| "loss": 0.0476, | |
| "step": 7850 | |
| }, | |
| { | |
| "epoch": 1.2139070303495967, | |
| "grad_norm": 1.9929716522339607, | |
| "learning_rate": 2.0070967247759794e-06, | |
| "loss": 0.0507, | |
| "step": 7900 | |
| }, | |
| { | |
| "epoch": 1.2215904725316942, | |
| "grad_norm": 1.8920582106563741, | |
| "learning_rate": 1.974276153899781e-06, | |
| "loss": 0.0513, | |
| "step": 7950 | |
| }, | |
| { | |
| "epoch": 1.2292739147137919, | |
| "grad_norm": 1.6121165647032283, | |
| "learning_rate": 1.9415501153799173e-06, | |
| "loss": 0.0478, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.2369573568958894, | |
| "grad_norm": 1.8003625671448367, | |
| "learning_rate": 1.908924493806859e-06, | |
| "loss": 0.0489, | |
| "step": 8050 | |
| }, | |
| { | |
| "epoch": 1.2446407990779869, | |
| "grad_norm": 1.6939131088624355, | |
| "learning_rate": 1.8764051557147316e-06, | |
| "loss": 0.0451, | |
| "step": 8100 | |
| }, | |
| { | |
| "epoch": 1.2523242412600846, | |
| "grad_norm": 1.7468120490008145, | |
| "learning_rate": 1.8439979485264352e-06, | |
| "loss": 0.0498, | |
| "step": 8150 | |
| }, | |
| { | |
| "epoch": 1.260007683442182, | |
| "grad_norm": 2.5089815003929155, | |
| "learning_rate": 1.8117086995021942e-06, | |
| "loss": 0.0491, | |
| "step": 8200 | |
| }, | |
| { | |
| "epoch": 1.2676911256242795, | |
| "grad_norm": 2.368789113392285, | |
| "learning_rate": 1.7795432146917391e-06, | |
| "loss": 0.0454, | |
| "step": 8250 | |
| }, | |
| { | |
| "epoch": 1.2753745678063773, | |
| "grad_norm": 1.8775747763554873, | |
| "learning_rate": 1.7475072778902962e-06, | |
| "loss": 0.0506, | |
| "step": 8300 | |
| }, | |
| { | |
| "epoch": 1.283058009988475, | |
| "grad_norm": 1.7306644111557166, | |
| "learning_rate": 1.715606649598584e-06, | |
| "loss": 0.0492, | |
| "step": 8350 | |
| }, | |
| { | |
| "epoch": 1.2907414521705725, | |
| "grad_norm": 1.6336792554226593, | |
| "learning_rate": 1.6838470659869971e-06, | |
| "loss": 0.0488, | |
| "step": 8400 | |
| }, | |
| { | |
| "epoch": 1.2907414521705725, | |
| "eval_loss": 0.06931500881910324, | |
| "eval_runtime": 18.4535, | |
| "eval_samples_per_second": 199.475, | |
| "eval_steps_per_second": 6.286, | |
| "step": 8400 | |
| }, | |
| { | |
| "epoch": 1.29842489435267, | |
| "grad_norm": 2.9094369118459174, | |
| "learning_rate": 1.6522342378641587e-06, | |
| "loss": 0.0494, | |
| "step": 8450 | |
| }, | |
| { | |
| "epoch": 1.3061083365347677, | |
| "grad_norm": 2.184054614982441, | |
| "learning_rate": 1.620773849650048e-06, | |
| "loss": 0.0502, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 1.3137917787168651, | |
| "grad_norm": 2.1244045904997537, | |
| "learning_rate": 1.5894715583538528e-06, | |
| "loss": 0.0466, | |
| "step": 8550 | |
| }, | |
| { | |
| "epoch": 1.3214752208989626, | |
| "grad_norm": 2.4284504241807467, | |
| "learning_rate": 1.558332992556772e-06, | |
| "loss": 0.0493, | |
| "step": 8600 | |
| }, | |
| { | |
| "epoch": 1.3291586630810603, | |
| "grad_norm": 1.540633041404585, | |
| "learning_rate": 1.5273637513999112e-06, | |
| "loss": 0.0442, | |
| "step": 8650 | |
| }, | |
| { | |
| "epoch": 1.3368421052631578, | |
| "grad_norm": 1.8384007539282532, | |
| "learning_rate": 1.4965694035774864e-06, | |
| "loss": 0.0497, | |
| "step": 8700 | |
| }, | |
| { | |
| "epoch": 1.3445255474452555, | |
| "grad_norm": 1.7618498321526446, | |
| "learning_rate": 1.465955486335493e-06, | |
| "loss": 0.0458, | |
| "step": 8750 | |
| }, | |
| { | |
| "epoch": 1.352208989627353, | |
| "grad_norm": 2.231775422548153, | |
| "learning_rate": 1.435527504476033e-06, | |
| "loss": 0.0493, | |
| "step": 8800 | |
| }, | |
| { | |
| "epoch": 1.3598924318094507, | |
| "grad_norm": 1.7224514278527392, | |
| "learning_rate": 1.4052909293674792e-06, | |
| "loss": 0.0476, | |
| "step": 8850 | |
| }, | |
| { | |
| "epoch": 1.3675758739915482, | |
| "grad_norm": 1.8541565429096656, | |
| "learning_rate": 1.375251197960643e-06, | |
| "loss": 0.0488, | |
| "step": 8900 | |
| }, | |
| { | |
| "epoch": 1.3752593161736457, | |
| "grad_norm": 2.381490833870002, | |
| "learning_rate": 1.345413711811143e-06, | |
| "loss": 0.0494, | |
| "step": 8950 | |
| }, | |
| { | |
| "epoch": 1.3829427583557434, | |
| "grad_norm": 2.278959306976094, | |
| "learning_rate": 1.315783836108122e-06, | |
| "loss": 0.0494, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.3829427583557434, | |
| "eval_loss": 0.06873823702335358, | |
| "eval_runtime": 17.8999, | |
| "eval_samples_per_second": 205.644, | |
| "eval_steps_per_second": 6.48, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.390626200537841, | |
| "grad_norm": 2.1844843542778904, | |
| "learning_rate": 1.2863668987095232e-06, | |
| "loss": 0.046, | |
| "step": 9050 | |
| }, | |
| { | |
| "epoch": 1.3983096427199384, | |
| "grad_norm": 1.8497980702776782, | |
| "learning_rate": 1.2571681891840604e-06, | |
| "loss": 0.0457, | |
| "step": 9100 | |
| }, | |
| { | |
| "epoch": 1.4059930849020361, | |
| "grad_norm": 2.361384912110055, | |
| "learning_rate": 1.2281929578600818e-06, | |
| "loss": 0.0446, | |
| "step": 9150 | |
| }, | |
| { | |
| "epoch": 1.4136765270841338, | |
| "grad_norm": 1.7735596712796649, | |
| "learning_rate": 1.1994464148814944e-06, | |
| "loss": 0.0491, | |
| "step": 9200 | |
| }, | |
| { | |
| "epoch": 1.4213599692662313, | |
| "grad_norm": 2.5534458205266075, | |
| "learning_rate": 1.1709337292709006e-06, | |
| "loss": 0.0513, | |
| "step": 9250 | |
| }, | |
| { | |
| "epoch": 1.4290434114483288, | |
| "grad_norm": 1.9943434045148776, | |
| "learning_rate": 1.1426600280001452e-06, | |
| "loss": 0.0457, | |
| "step": 9300 | |
| }, | |
| { | |
| "epoch": 1.4367268536304265, | |
| "grad_norm": 1.6552094390596166, | |
| "learning_rate": 1.1146303950684086e-06, | |
| "loss": 0.0474, | |
| "step": 9350 | |
| }, | |
| { | |
| "epoch": 1.444410295812524, | |
| "grad_norm": 2.142802888711476, | |
| "learning_rate": 1.08684987058804e-06, | |
| "loss": 0.0477, | |
| "step": 9400 | |
| }, | |
| { | |
| "epoch": 1.4520937379946215, | |
| "grad_norm": 1.964334339120735, | |
| "learning_rate": 1.0593234498782707e-06, | |
| "loss": 0.0468, | |
| "step": 9450 | |
| }, | |
| { | |
| "epoch": 1.4597771801767192, | |
| "grad_norm": 1.6390715251269143, | |
| "learning_rate": 1.0320560825669907e-06, | |
| "loss": 0.049, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.4674606223588167, | |
| "grad_norm": 2.0864027751268814, | |
| "learning_rate": 1.0050526717007386e-06, | |
| "loss": 0.0438, | |
| "step": 9550 | |
| }, | |
| { | |
| "epoch": 1.4751440645409144, | |
| "grad_norm": 1.9215266401428566, | |
| "learning_rate": 9.783180728630606e-07, | |
| "loss": 0.0483, | |
| "step": 9600 | |
| }, | |
| { | |
| "epoch": 1.4751440645409144, | |
| "eval_loss": 0.06846572458744049, | |
| "eval_runtime": 17.9503, | |
| "eval_samples_per_second": 205.066, | |
| "eval_steps_per_second": 6.462, | |
| "step": 9600 | |
| }, | |
| { | |
| "epoch": 1.482827506723012, | |
| "grad_norm": 1.7928996170417553, | |
| "learning_rate": 9.518570933014182e-07, | |
| "loss": 0.0478, | |
| "step": 9650 | |
| }, | |
| { | |
| "epoch": 1.4905109489051096, | |
| "grad_norm": 1.9873792527502019, | |
| "learning_rate": 9.25674491062774e-07, | |
| "loss": 0.0469, | |
| "step": 9700 | |
| }, | |
| { | |
| "epoch": 1.498194391087207, | |
| "grad_norm": 1.8602555769335043, | |
| "learning_rate": 8.997749741380291e-07, | |
| "loss": 0.0478, | |
| "step": 9750 | |
| }, | |
| { | |
| "epoch": 1.5058778332693046, | |
| "grad_norm": 2.0982729436998677, | |
| "learning_rate": 8.741631996154651e-07, | |
| "loss": 0.0479, | |
| "step": 9800 | |
| }, | |
| { | |
| "epoch": 1.5135612754514023, | |
| "grad_norm": 1.698553296101875, | |
| "learning_rate": 8.488437728433346e-07, | |
| "loss": 0.0453, | |
| "step": 9850 | |
| }, | |
| { | |
| "epoch": 1.5212447176334998, | |
| "grad_norm": 1.7327244928480596, | |
| "learning_rate": 8.238212466017536e-07, | |
| "loss": 0.0453, | |
| "step": 9900 | |
| }, | |
| { | |
| "epoch": 1.5289281598155973, | |
| "grad_norm": 2.5157076501848787, | |
| "learning_rate": 7.99100120284054e-07, | |
| "loss": 0.0469, | |
| "step": 9950 | |
| }, | |
| { | |
| "epoch": 1.536611601997695, | |
| "grad_norm": 2.2123154799862776, | |
| "learning_rate": 7.746848390877282e-07, | |
| "loss": 0.0474, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 1.5442950441797927, | |
| "grad_norm": 2.1165949034476106, | |
| "learning_rate": 7.505797932151198e-07, | |
| "loss": 0.0501, | |
| "step": 10050 | |
| }, | |
| { | |
| "epoch": 1.5519784863618902, | |
| "grad_norm": 1.9622924083143274, | |
| "learning_rate": 7.267893170840104e-07, | |
| "loss": 0.0472, | |
| "step": 10100 | |
| }, | |
| { | |
| "epoch": 1.5596619285439877, | |
| "grad_norm": 2.0167441488769984, | |
| "learning_rate": 7.033176885482257e-07, | |
| "loss": 0.0466, | |
| "step": 10150 | |
| }, | |
| { | |
| "epoch": 1.5673453707260854, | |
| "grad_norm": 2.583323639603209, | |
| "learning_rate": 6.801691281284243e-07, | |
| "loss": 0.0448, | |
| "step": 10200 | |
| }, | |
| { | |
| "epoch": 1.5673453707260854, | |
| "eval_loss": 0.06800152361392975, | |
| "eval_runtime": 17.9086, | |
| "eval_samples_per_second": 205.544, | |
| "eval_steps_per_second": 6.477, | |
| "step": 10200 | |
| }, | |
| { | |
| "epoch": 1.5750288129081829, | |
| "grad_norm": 2.320026361459745, | |
| "learning_rate": 6.573477982531845e-07, | |
| "loss": 0.0457, | |
| "step": 10250 | |
| }, | |
| { | |
| "epoch": 1.5827122550902804, | |
| "grad_norm": 1.8563171575104342, | |
| "learning_rate": 6.348578025105487e-07, | |
| "loss": 0.0445, | |
| "step": 10300 | |
| }, | |
| { | |
| "epoch": 1.590395697272378, | |
| "grad_norm": 2.4148912012974337, | |
| "learning_rate": 6.12703184910138e-07, | |
| "loss": 0.0458, | |
| "step": 10350 | |
| }, | |
| { | |
| "epoch": 1.5980791394544756, | |
| "grad_norm": 2.0191213809598416, | |
| "learning_rate": 5.908879291559835e-07, | |
| "loss": 0.0465, | |
| "step": 10400 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 13016, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 2604, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 248681020006400.0, | |
| "train_batch_size": 28, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |