VEGA-3D-Spatial-Reasoning / trainer_state.json
HyperbolicCurve's picture
Upload folder using huggingface_hub
51b9977 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9998389434691577,
"eval_steps": 500,
"global_step": 4656,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.010737102056155044,
"grad_norm": 59.99636459350586,
"learning_rate": 3.5714285714285718e-06,
"loss": 12.7755,
"step": 50
},
{
"epoch": 0.02147420411231009,
"grad_norm": 47.23111343383789,
"learning_rate": 7.1428571428571436e-06,
"loss": 7.8117,
"step": 100
},
{
"epoch": 0.03221130616846513,
"grad_norm": 47.798980712890625,
"learning_rate": 9.999879015387978e-06,
"loss": 7.1417,
"step": 150
},
{
"epoch": 0.04294840822462018,
"grad_norm": 37.97929763793945,
"learning_rate": 9.995645168701038e-06,
"loss": 5.285,
"step": 200
},
{
"epoch": 0.05368551028077522,
"grad_norm": 46.82593536376953,
"learning_rate": 9.98536794504998e-06,
"loss": 5.5196,
"step": 250
},
{
"epoch": 0.06442261233693027,
"grad_norm": 47.67216873168945,
"learning_rate": 9.969059777090564e-06,
"loss": 5.5595,
"step": 300
},
{
"epoch": 0.0751597143930853,
"grad_norm": 47.013511657714844,
"learning_rate": 9.946740393286928e-06,
"loss": 5.0251,
"step": 350
},
{
"epoch": 0.08589681644924035,
"grad_norm": 24.63237190246582,
"learning_rate": 9.918436794045507e-06,
"loss": 4.7625,
"step": 400
},
{
"epoch": 0.09663391850539539,
"grad_norm": 22.05866813659668,
"learning_rate": 9.884183219051837e-06,
"loss": 5.1414,
"step": 450
},
{
"epoch": 0.10737102056155044,
"grad_norm": 21.014677047729492,
"learning_rate": 9.844021105849837e-06,
"loss": 3.8645,
"step": 500
},
{
"epoch": 0.11810812261770548,
"grad_norm": 43.38600540161133,
"learning_rate": 9.797999039713586e-06,
"loss": 4.6035,
"step": 550
},
{
"epoch": 0.12884522467386053,
"grad_norm": 22.228700637817383,
"learning_rate": 9.746172694872332e-06,
"loss": 4.1814,
"step": 600
},
{
"epoch": 0.13958232673001558,
"grad_norm": 23.762821197509766,
"learning_rate": 9.688604767159736e-06,
"loss": 4.6889,
"step": 650
},
{
"epoch": 0.1503194287861706,
"grad_norm": 20.935850143432617,
"learning_rate": 9.62536489816892e-06,
"loss": 4.8945,
"step": 700
},
{
"epoch": 0.16105653084232566,
"grad_norm": 19.82425880432129,
"learning_rate": 9.556529591005001e-06,
"loss": 4.494,
"step": 750
},
{
"epoch": 0.1717936328984807,
"grad_norm": 22.668556213378906,
"learning_rate": 9.482182117737066e-06,
"loss": 4.4041,
"step": 800
},
{
"epoch": 0.18253073495463573,
"grad_norm": 22.826379776000977,
"learning_rate": 9.402412418661541e-06,
"loss": 4.8089,
"step": 850
},
{
"epoch": 0.19326783701079078,
"grad_norm": 19.665138244628906,
"learning_rate": 9.317316993498788e-06,
"loss": 4.232,
"step": 900
},
{
"epoch": 0.20400493906694583,
"grad_norm": 21.231645584106445,
"learning_rate": 9.226998784654606e-06,
"loss": 3.7319,
"step": 950
},
{
"epoch": 0.21474204112310089,
"grad_norm": 37.30881118774414,
"learning_rate": 9.131567052687811e-06,
"loss": 4.4938,
"step": 1000
},
{
"epoch": 0.2254791431792559,
"grad_norm": 23.012168884277344,
"learning_rate": 9.03113724413456e-06,
"loss": 4.9744,
"step": 1050
},
{
"epoch": 0.23621624523541096,
"grad_norm": 25.494842529296875,
"learning_rate": 8.925830851849338e-06,
"loss": 3.9475,
"step": 1100
},
{
"epoch": 0.246953347291566,
"grad_norm": 21.22249412536621,
"learning_rate": 8.815775268031514e-06,
"loss": 4.3448,
"step": 1150
},
{
"epoch": 0.25769044934772106,
"grad_norm": 40.145023345947266,
"learning_rate": 8.701103630115303e-06,
"loss": 4.4259,
"step": 1200
},
{
"epoch": 0.2684275514038761,
"grad_norm": 17.972578048706055,
"learning_rate": 8.581954659709549e-06,
"loss": 4.7292,
"step": 1250
},
{
"epoch": 0.27916465346003116,
"grad_norm": 19.54686737060547,
"learning_rate": 8.458472494782169e-06,
"loss": 4.4949,
"step": 1300
},
{
"epoch": 0.2899017555161862,
"grad_norm": 39.886653900146484,
"learning_rate": 8.330806515292271e-06,
"loss": 4.1289,
"step": 1350
},
{
"epoch": 0.3006388575723412,
"grad_norm": 19.24032974243164,
"learning_rate": 8.199111162480871e-06,
"loss": 4.6771,
"step": 1400
},
{
"epoch": 0.3113759596284963,
"grad_norm": 18.134519577026367,
"learning_rate": 8.063545752038854e-06,
"loss": 3.7991,
"step": 1450
},
{
"epoch": 0.3221130616846513,
"grad_norm": 25.192407608032227,
"learning_rate": 7.924274281378153e-06,
"loss": 3.6166,
"step": 1500
},
{
"epoch": 0.33285016374080634,
"grad_norm": 22.86334800720215,
"learning_rate": 7.781465231239318e-06,
"loss": 5.0183,
"step": 1550
},
{
"epoch": 0.3435872657969614,
"grad_norm": 17.691837310791016,
"learning_rate": 7.635291361875474e-06,
"loss": 4.2544,
"step": 1600
},
{
"epoch": 0.35432436785311644,
"grad_norm": 21.726091384887695,
"learning_rate": 7.485929504059234e-06,
"loss": 4.2973,
"step": 1650
},
{
"epoch": 0.36506146990927146,
"grad_norm": 17.235157012939453,
"learning_rate": 7.333560345165371e-06,
"loss": 3.714,
"step": 1700
},
{
"epoch": 0.37579857196542654,
"grad_norm": 16.26618194580078,
"learning_rate": 7.178368210588067e-06,
"loss": 3.5892,
"step": 1750
},
{
"epoch": 0.38653567402158157,
"grad_norm": 14.692360877990723,
"learning_rate": 7.020540840757124e-06,
"loss": 4.577,
"step": 1800
},
{
"epoch": 0.39727277607773664,
"grad_norm": 20.708250045776367,
"learning_rate": 6.860269164022921e-06,
"loss": 4.4776,
"step": 1850
},
{
"epoch": 0.40800987813389167,
"grad_norm": 17.772565841674805,
"learning_rate": 6.697747065684851e-06,
"loss": 4.6948,
"step": 1900
},
{
"epoch": 0.4187469801900467,
"grad_norm": 17.883790969848633,
"learning_rate": 6.5331711534426326e-06,
"loss": 4.0064,
"step": 1950
},
{
"epoch": 0.42948408224620177,
"grad_norm": 20.181188583374023,
"learning_rate": 6.366740519554286e-06,
"loss": 3.9072,
"step": 2000
},
{
"epoch": 0.4402211843023568,
"grad_norm": 37.15888214111328,
"learning_rate": 6.198656499988444e-06,
"loss": 4.2209,
"step": 2050
},
{
"epoch": 0.4509582863585118,
"grad_norm": 17.573535919189453,
"learning_rate": 6.029122430862373e-06,
"loss": 4.8097,
"step": 2100
},
{
"epoch": 0.4616953884146669,
"grad_norm": 38.13616943359375,
"learning_rate": 5.858343402460391e-06,
"loss": 4.0292,
"step": 2150
},
{
"epoch": 0.4724324904708219,
"grad_norm": 17.187131881713867,
"learning_rate": 5.68652601113019e-06,
"loss": 3.7332,
"step": 2200
},
{
"epoch": 0.48316959252697694,
"grad_norm": 35.56867599487305,
"learning_rate": 5.513878109357228e-06,
"loss": 4.2786,
"step": 2250
},
{
"epoch": 0.493906694583132,
"grad_norm": 18.53168487548828,
"learning_rate": 5.3406085543195555e-06,
"loss": 4.8574,
"step": 2300
},
{
"epoch": 0.504643796639287,
"grad_norm": 20.49349594116211,
"learning_rate": 5.166926955227224e-06,
"loss": 4.9073,
"step": 2350
},
{
"epoch": 0.5153808986954421,
"grad_norm": 19.841901779174805,
"learning_rate": 4.993043419751933e-06,
"loss": 3.827,
"step": 2400
},
{
"epoch": 0.5261180007515971,
"grad_norm": 19.35201644897461,
"learning_rate": 4.8191682998536905e-06,
"loss": 3.4893,
"step": 2450
},
{
"epoch": 0.5368551028077522,
"grad_norm": 21.604345321655273,
"learning_rate": 4.645511937311934e-06,
"loss": 4.367,
"step": 2500
},
{
"epoch": 0.5475922048639073,
"grad_norm": 40.630821228027344,
"learning_rate": 4.472284409268976e-06,
"loss": 5.0816,
"step": 2550
},
{
"epoch": 0.5583293069200623,
"grad_norm": 18.870628356933594,
"learning_rate": 4.299695274093593e-06,
"loss": 4.8803,
"step": 2600
},
{
"epoch": 0.5690664089762173,
"grad_norm": 21.05728530883789,
"learning_rate": 4.1279533178721755e-06,
"loss": 4.6022,
"step": 2650
},
{
"epoch": 0.5798035110323724,
"grad_norm": 39.68675231933594,
"learning_rate": 3.957266301834145e-06,
"loss": 4.212,
"step": 2700
},
{
"epoch": 0.5905406130885275,
"grad_norm": 17.26553726196289,
"learning_rate": 3.7878407110171646e-06,
"loss": 4.0448,
"step": 2750
},
{
"epoch": 0.6012777151446824,
"grad_norm": 21.71043586730957,
"learning_rate": 3.6198815044761847e-06,
"loss": 4.6691,
"step": 2800
},
{
"epoch": 0.6120148172008375,
"grad_norm": 21.753311157226562,
"learning_rate": 3.4535918673385456e-06,
"loss": 4.3453,
"step": 2850
},
{
"epoch": 0.6227519192569926,
"grad_norm": 18.6822452545166,
"learning_rate": 3.2891729650050096e-06,
"loss": 4.2042,
"step": 2900
},
{
"epoch": 0.6334890213131475,
"grad_norm": 18.38760757446289,
"learning_rate": 3.1268236997941535e-06,
"loss": 3.8025,
"step": 2950
},
{
"epoch": 0.6442261233693026,
"grad_norm": 39.5139045715332,
"learning_rate": 2.966740470324451e-06,
"loss": 5.1229,
"step": 3000
},
{
"epoch": 0.6549632254254577,
"grad_norm": 20.45186424255371,
"learning_rate": 2.8091169339251644e-06,
"loss": 4.0329,
"step": 3050
},
{
"epoch": 0.6657003274816127,
"grad_norm": 18.005401611328125,
"learning_rate": 2.654143772363455e-06,
"loss": 4.0975,
"step": 3100
},
{
"epoch": 0.6764374295377678,
"grad_norm": 21.24009895324707,
"learning_rate": 2.502008461171114e-06,
"loss": 3.9577,
"step": 3150
},
{
"epoch": 0.6871745315939228,
"grad_norm": 18.686386108398438,
"learning_rate": 2.352895042849965e-06,
"loss": 3.9965,
"step": 3200
},
{
"epoch": 0.6979116336500778,
"grad_norm": 24.032150268554688,
"learning_rate": 2.20698390423032e-06,
"loss": 4.6225,
"step": 3250
},
{
"epoch": 0.7086487357062329,
"grad_norm": 18.239240646362305,
"learning_rate": 2.0644515582517803e-06,
"loss": 4.2193,
"step": 3300
},
{
"epoch": 0.719385837762388,
"grad_norm": 21.52141571044922,
"learning_rate": 1.9254704304304174e-06,
"loss": 4.5206,
"step": 3350
},
{
"epoch": 0.7301229398185429,
"grad_norm": 20.970258712768555,
"learning_rate": 1.7902086502706256e-06,
"loss": 3.7111,
"step": 3400
},
{
"epoch": 0.740860041874698,
"grad_norm": 17.317781448364258,
"learning_rate": 1.658829847873965e-06,
"loss": 4.1838,
"step": 3450
},
{
"epoch": 0.7515971439308531,
"grad_norm": 19.438854217529297,
"learning_rate": 1.5314929559910985e-06,
"loss": 4.3512,
"step": 3500
},
{
"epoch": 0.762334245987008,
"grad_norm": 26.218725204467773,
"learning_rate": 1.4083520177562154e-06,
"loss": 3.1841,
"step": 3550
},
{
"epoch": 0.7730713480431631,
"grad_norm": 26.113292694091797,
"learning_rate": 1.2895560003365837e-06,
"loss": 4.6759,
"step": 3600
},
{
"epoch": 0.7838084500993182,
"grad_norm": 20.18511962890625,
"learning_rate": 1.1752486147226505e-06,
"loss": 3.9116,
"step": 3650
},
{
"epoch": 0.7945455521554733,
"grad_norm": 21.312776565551758,
"learning_rate": 1.0655681418766772e-06,
"loss": 4.0108,
"step": 3700
},
{
"epoch": 0.8052826542116283,
"grad_norm": 23.473669052124023,
"learning_rate": 9.60647265450249e-07,
"loss": 3.5384,
"step": 3750
},
{
"epoch": 0.8160197562677833,
"grad_norm": 34.65909957885742,
"learning_rate": 8.60612911273011e-07,
"loss": 4.4025,
"step": 3800
},
{
"epoch": 0.8267568583239384,
"grad_norm": 18.83558464050293,
"learning_rate": 7.655860938068071e-07,
"loss": 3.2632,
"step": 3850
},
{
"epoch": 0.8374939603800934,
"grad_norm": 21.728172302246094,
"learning_rate": 6.756817697509755e-07,
"loss": 4.0358,
"step": 3900
},
{
"epoch": 0.8482310624362485,
"grad_norm": 18.24883460998535,
"learning_rate": 5.910086989758862e-07,
"loss": 3.4722,
"step": 3950
},
{
"epoch": 0.8589681644924035,
"grad_norm": 16.885324478149414,
"learning_rate": 5.11669312952977e-07,
"loss": 4.0926,
"step": 4000
},
{
"epoch": 0.8697052665485585,
"grad_norm": 16.516597747802734,
"learning_rate": 4.377595908404225e-07,
"loss": 3.9504,
"step": 4050
},
{
"epoch": 0.8804423686047136,
"grad_norm": 17.790292739868164,
"learning_rate": 3.693689433743658e-07,
"loss": 4.0759,
"step": 4100
},
{
"epoch": 0.8911794706608687,
"grad_norm": 18.888835906982422,
"learning_rate": 3.065801047061517e-07,
"loss": 3.3214,
"step": 4150
},
{
"epoch": 0.9019165727170236,
"grad_norm": 36.61859893798828,
"learning_rate": 2.4946903231642727e-07,
"loss": 4.5779,
"step": 4200
},
{
"epoch": 0.9126536747731787,
"grad_norm": 21.771671295166016,
"learning_rate": 1.9810481512716638e-07,
"loss": 3.8909,
"step": 4250
},
{
"epoch": 0.9233907768293338,
"grad_norm": 19.28317642211914,
"learning_rate": 1.5254958992280022e-07,
"loss": 3.9403,
"step": 4300
},
{
"epoch": 0.9341278788854888,
"grad_norm": 16.99443817138672,
"learning_rate": 1.128584661815435e-07,
"loss": 3.7742,
"step": 4350
},
{
"epoch": 0.9448649809416438,
"grad_norm": 20.192611694335938,
"learning_rate": 7.907945940786033e-08,
"loss": 3.7403,
"step": 4400
},
{
"epoch": 0.9556020829977989,
"grad_norm": 18.506389617919922,
"learning_rate": 5.125343304671459e-08,
"loss": 5.146,
"step": 4450
},
{
"epoch": 0.9663391850539539,
"grad_norm": 20.306739807128906,
"learning_rate": 2.9414049049872883e-08,
"loss": 3.5826,
"step": 4500
},
{
"epoch": 0.977076287110109,
"grad_norm": 22.654619216918945,
"learning_rate": 1.35877271540652e-08,
"loss": 4.2733,
"step": 4550
},
{
"epoch": 0.987813389166264,
"grad_norm": 18.207374572753906,
"learning_rate": 3.7936129202648106e-09,
"loss": 3.4258,
"step": 4600
},
{
"epoch": 0.9985504912224191,
"grad_norm": 25.076688766479492,
"learning_rate": 4.3554572743409463e-11,
"loss": 4.9966,
"step": 4650
},
{
"epoch": 0.9998389434691577,
"step": 4656,
"total_flos": 3.3921983034765083e+19,
"train_loss": 4.446265889606934,
"train_runtime": 69353.2916,
"train_samples_per_second": 4.297,
"train_steps_per_second": 0.067
}
],
"logging_steps": 50,
"max_steps": 4656,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.3921983034765083e+19,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}