webagent / agenttrek-llama /trainer_state.json
Hizy's picture
Upload folder using huggingface_hub
4481768 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0135203650498563,
"eval_steps": 500,
"global_step": 1500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.006760182524928173,
"grad_norm": 30.429955125606906,
"learning_rate": 2.0270270270270273e-07,
"loss": 1.0305,
"step": 10
},
{
"epoch": 0.013520365049856346,
"grad_norm": 11.031569316357162,
"learning_rate": 4.27927927927928e-07,
"loss": 0.9433,
"step": 20
},
{
"epoch": 0.020280547574784518,
"grad_norm": 7.568247729610925,
"learning_rate": 6.531531531531532e-07,
"loss": 0.7961,
"step": 30
},
{
"epoch": 0.027040730099712692,
"grad_norm": 7.13506773134569,
"learning_rate": 8.783783783783785e-07,
"loss": 0.6703,
"step": 40
},
{
"epoch": 0.03380091262464086,
"grad_norm": 5.13118234249973,
"learning_rate": 1.1036036036036037e-06,
"loss": 0.5868,
"step": 50
},
{
"epoch": 0.040561095149569036,
"grad_norm": 5.539727298943678,
"learning_rate": 1.328828828828829e-06,
"loss": 0.5074,
"step": 60
},
{
"epoch": 0.04732127767449721,
"grad_norm": 5.305287069709931,
"learning_rate": 1.5540540540540541e-06,
"loss": 0.4983,
"step": 70
},
{
"epoch": 0.054081460199425384,
"grad_norm": 5.106611798846999,
"learning_rate": 1.7792792792792792e-06,
"loss": 0.497,
"step": 80
},
{
"epoch": 0.06084164272435356,
"grad_norm": 4.582177338515729,
"learning_rate": 2.0045045045045045e-06,
"loss": 0.4695,
"step": 90
},
{
"epoch": 0.06760182524928172,
"grad_norm": 4.739272582163485,
"learning_rate": 2.22972972972973e-06,
"loss": 0.4408,
"step": 100
},
{
"epoch": 0.0743620077742099,
"grad_norm": 4.322112804201578,
"learning_rate": 2.454954954954955e-06,
"loss": 0.4215,
"step": 110
},
{
"epoch": 0.08112219029913807,
"grad_norm": 4.480237396840778,
"learning_rate": 2.6801801801801803e-06,
"loss": 0.4222,
"step": 120
},
{
"epoch": 0.08788237282406625,
"grad_norm": 4.588802640700157,
"learning_rate": 2.9054054054054054e-06,
"loss": 0.4297,
"step": 130
},
{
"epoch": 0.09464255534899442,
"grad_norm": 3.634384371273362,
"learning_rate": 3.130630630630631e-06,
"loss": 0.4132,
"step": 140
},
{
"epoch": 0.1014027378739226,
"grad_norm": 4.029617898426818,
"learning_rate": 3.3558558558558565e-06,
"loss": 0.4123,
"step": 150
},
{
"epoch": 0.10816292039885077,
"grad_norm": 3.6780967071398005,
"learning_rate": 3.5810810810810816e-06,
"loss": 0.4022,
"step": 160
},
{
"epoch": 0.11492310292377894,
"grad_norm": 3.8355903508595155,
"learning_rate": 3.8063063063063067e-06,
"loss": 0.417,
"step": 170
},
{
"epoch": 0.12168328544870712,
"grad_norm": 3.8796259132382604,
"learning_rate": 4.031531531531531e-06,
"loss": 0.4137,
"step": 180
},
{
"epoch": 0.1284434679736353,
"grad_norm": 3.513646073103006,
"learning_rate": 4.256756756756757e-06,
"loss": 0.3838,
"step": 190
},
{
"epoch": 0.13520365049856345,
"grad_norm": 3.280829869725974,
"learning_rate": 4.4819819819819824e-06,
"loss": 0.3933,
"step": 200
},
{
"epoch": 0.14196383302349164,
"grad_norm": 3.8684728373816877,
"learning_rate": 4.707207207207208e-06,
"loss": 0.418,
"step": 210
},
{
"epoch": 0.1487240155484198,
"grad_norm": 3.430012571317074,
"learning_rate": 4.932432432432433e-06,
"loss": 0.4068,
"step": 220
},
{
"epoch": 0.15548419807334798,
"grad_norm": 3.4236933785478594,
"learning_rate": 5.157657657657657e-06,
"loss": 0.4122,
"step": 230
},
{
"epoch": 0.16224438059827614,
"grad_norm": 3.337506294944149,
"learning_rate": 5.382882882882884e-06,
"loss": 0.4195,
"step": 240
},
{
"epoch": 0.16900456312320433,
"grad_norm": 4.340322941603959,
"learning_rate": 5.608108108108109e-06,
"loss": 0.3922,
"step": 250
},
{
"epoch": 0.1757647456481325,
"grad_norm": 3.079365176995162,
"learning_rate": 5.833333333333334e-06,
"loss": 0.3971,
"step": 260
},
{
"epoch": 0.18252492817306068,
"grad_norm": 3.1303889674723857,
"learning_rate": 6.0585585585585595e-06,
"loss": 0.4135,
"step": 270
},
{
"epoch": 0.18928511069798884,
"grad_norm": 3.2972741045149605,
"learning_rate": 6.283783783783784e-06,
"loss": 0.4021,
"step": 280
},
{
"epoch": 0.19604529322291703,
"grad_norm": 6.693630492235905,
"learning_rate": 6.50900900900901e-06,
"loss": 0.4159,
"step": 290
},
{
"epoch": 0.2028054757478452,
"grad_norm": 2.9729373757056456,
"learning_rate": 6.734234234234235e-06,
"loss": 0.4058,
"step": 300
},
{
"epoch": 0.20956565827277338,
"grad_norm": 2.95948849243511,
"learning_rate": 6.95945945945946e-06,
"loss": 0.4202,
"step": 310
},
{
"epoch": 0.21632584079770154,
"grad_norm": 2.8167543124734618,
"learning_rate": 7.1846846846846855e-06,
"loss": 0.4018,
"step": 320
},
{
"epoch": 0.22308602332262972,
"grad_norm": 2.8063934885840314,
"learning_rate": 7.40990990990991e-06,
"loss": 0.3997,
"step": 330
},
{
"epoch": 0.22984620584755788,
"grad_norm": 3.1516244053144,
"learning_rate": 7.635135135135135e-06,
"loss": 0.4172,
"step": 340
},
{
"epoch": 0.23660638837248607,
"grad_norm": 2.6885724493695715,
"learning_rate": 7.860360360360361e-06,
"loss": 0.3972,
"step": 350
},
{
"epoch": 0.24336657089741423,
"grad_norm": 2.6078839515038528,
"learning_rate": 8.085585585585586e-06,
"loss": 0.4076,
"step": 360
},
{
"epoch": 0.2501267534223424,
"grad_norm": 2.5241273124658434,
"learning_rate": 8.31081081081081e-06,
"loss": 0.4114,
"step": 370
},
{
"epoch": 0.2568869359472706,
"grad_norm": 2.811459731213271,
"learning_rate": 8.536036036036037e-06,
"loss": 0.397,
"step": 380
},
{
"epoch": 0.26364711847219874,
"grad_norm": 3.049745389493281,
"learning_rate": 8.761261261261262e-06,
"loss": 0.4213,
"step": 390
},
{
"epoch": 0.2704073009971269,
"grad_norm": 2.7296156561054303,
"learning_rate": 8.986486486486488e-06,
"loss": 0.42,
"step": 400
},
{
"epoch": 0.2771674835220551,
"grad_norm": 2.8072799681272285,
"learning_rate": 9.211711711711713e-06,
"loss": 0.4115,
"step": 410
},
{
"epoch": 0.2839276660469833,
"grad_norm": 2.45386826986572,
"learning_rate": 9.436936936936937e-06,
"loss": 0.4276,
"step": 420
},
{
"epoch": 0.29068784857191143,
"grad_norm": 2.6328350388772703,
"learning_rate": 9.662162162162164e-06,
"loss": 0.4195,
"step": 430
},
{
"epoch": 0.2974480310968396,
"grad_norm": 2.5446377303913392,
"learning_rate": 9.887387387387388e-06,
"loss": 0.4181,
"step": 440
},
{
"epoch": 0.3042082136217678,
"grad_norm": 2.5969527355663975,
"learning_rate": 9.999961369685454e-06,
"loss": 0.4128,
"step": 450
},
{
"epoch": 0.31096839614669597,
"grad_norm": 2.771780495890244,
"learning_rate": 9.999652330750595e-06,
"loss": 0.4055,
"step": 460
},
{
"epoch": 0.31772857867162413,
"grad_norm": 2.4553977895620336,
"learning_rate": 9.99903427198204e-06,
"loss": 0.4012,
"step": 470
},
{
"epoch": 0.3244887611965523,
"grad_norm": 2.4098745339341456,
"learning_rate": 9.998107231580925e-06,
"loss": 0.3968,
"step": 480
},
{
"epoch": 0.3312489437214805,
"grad_norm": 2.545785489303096,
"learning_rate": 9.99687126684601e-06,
"loss": 0.4192,
"step": 490
},
{
"epoch": 0.33800912624640866,
"grad_norm": 2.6063200322372575,
"learning_rate": 9.995326454170132e-06,
"loss": 0.4109,
"step": 500
},
{
"epoch": 0.33800912624640866,
"eval_loss": 0.41546937823295593,
"eval_runtime": 1323.9234,
"eval_samples_per_second": 3.973,
"eval_steps_per_second": 0.497,
"step": 500
},
{
"epoch": 0.3447693087713368,
"grad_norm": 2.5250471936075582,
"learning_rate": 9.993472889035478e-06,
"loss": 0.4122,
"step": 510
},
{
"epoch": 0.351529491296265,
"grad_norm": 2.226305697507818,
"learning_rate": 9.991310686007694e-06,
"loss": 0.4068,
"step": 520
},
{
"epoch": 0.3582896738211932,
"grad_norm": 2.305497784945858,
"learning_rate": 9.988839978728798e-06,
"loss": 0.3922,
"step": 530
},
{
"epoch": 0.36504985634612136,
"grad_norm": 2.2810190229102267,
"learning_rate": 9.986060919908917e-06,
"loss": 0.4038,
"step": 540
},
{
"epoch": 0.3718100388710495,
"grad_norm": 2.0830775327743534,
"learning_rate": 9.982973681316854e-06,
"loss": 0.389,
"step": 550
},
{
"epoch": 0.3785702213959777,
"grad_norm": 2.412349310792309,
"learning_rate": 9.97957845376947e-06,
"loss": 0.4,
"step": 560
},
{
"epoch": 0.38533040392090584,
"grad_norm": 2.334930606027013,
"learning_rate": 9.975875447119884e-06,
"loss": 0.3925,
"step": 570
},
{
"epoch": 0.39209058644583406,
"grad_norm": 2.373633114437491,
"learning_rate": 9.971864890244514e-06,
"loss": 0.4038,
"step": 580
},
{
"epoch": 0.3988507689707622,
"grad_norm": 2.309096972224613,
"learning_rate": 9.967547031028917e-06,
"loss": 0.4008,
"step": 590
},
{
"epoch": 0.4056109514956904,
"grad_norm": 2.140294095106148,
"learning_rate": 9.962922136352482e-06,
"loss": 0.4004,
"step": 600
},
{
"epoch": 0.41237113402061853,
"grad_norm": 2.2849655710928287,
"learning_rate": 9.957990492071917e-06,
"loss": 0.4057,
"step": 610
},
{
"epoch": 0.41913131654554675,
"grad_norm": 2.092617163224422,
"learning_rate": 9.9527524030036e-06,
"loss": 0.4016,
"step": 620
},
{
"epoch": 0.4258914990704749,
"grad_norm": 2.786668476771917,
"learning_rate": 9.947208192904722e-06,
"loss": 0.3788,
"step": 630
},
{
"epoch": 0.43265168159540307,
"grad_norm": 2.1368728170135873,
"learning_rate": 9.941358204453294e-06,
"loss": 0.4022,
"step": 640
},
{
"epoch": 0.43941186412033123,
"grad_norm": 2.03479655017632,
"learning_rate": 9.935202799226941e-06,
"loss": 0.3795,
"step": 650
},
{
"epoch": 0.44617204664525945,
"grad_norm": 2.1100436195870964,
"learning_rate": 9.928742357680586e-06,
"loss": 0.3841,
"step": 660
},
{
"epoch": 0.4529322291701876,
"grad_norm": 2.1376422874279952,
"learning_rate": 9.9219772791229e-06,
"loss": 0.3809,
"step": 670
},
{
"epoch": 0.45969241169511577,
"grad_norm": 2.2131150187295865,
"learning_rate": 9.914907981691656e-06,
"loss": 0.3934,
"step": 680
},
{
"epoch": 0.4664525942200439,
"grad_norm": 2.5997656656639023,
"learning_rate": 9.907534902327855e-06,
"loss": 0.3975,
"step": 690
},
{
"epoch": 0.47321277674497214,
"grad_norm": 1.9856693804357932,
"learning_rate": 9.899858496748738e-06,
"loss": 0.38,
"step": 700
},
{
"epoch": 0.4799729592699003,
"grad_norm": 2.217516603205192,
"learning_rate": 9.891879239419609e-06,
"loss": 0.3789,
"step": 710
},
{
"epoch": 0.48673314179482846,
"grad_norm": 2.0702819723867427,
"learning_rate": 9.883597623524518e-06,
"loss": 0.3954,
"step": 720
},
{
"epoch": 0.4934933243197566,
"grad_norm": 2.2207526501051387,
"learning_rate": 9.875014160935773e-06,
"loss": 0.3816,
"step": 730
},
{
"epoch": 0.5002535068446848,
"grad_norm": 2.0307571044715487,
"learning_rate": 9.866129382182295e-06,
"loss": 0.3596,
"step": 740
},
{
"epoch": 0.507013689369613,
"grad_norm": 2.0484361446295756,
"learning_rate": 9.856943836416844e-06,
"loss": 0.3723,
"step": 750
},
{
"epoch": 0.5137738718945412,
"grad_norm": 2.188933520806837,
"learning_rate": 9.847458091382057e-06,
"loss": 0.3836,
"step": 760
},
{
"epoch": 0.5205340544194693,
"grad_norm": 2.2059318749133343,
"learning_rate": 9.837672733375377e-06,
"loss": 0.3764,
"step": 770
},
{
"epoch": 0.5272942369443975,
"grad_norm": 2.0738552350322657,
"learning_rate": 9.827588367212797e-06,
"loss": 0.3801,
"step": 780
},
{
"epoch": 0.5340544194693256,
"grad_norm": 2.049788797772694,
"learning_rate": 9.81720561619149e-06,
"loss": 0.3732,
"step": 790
},
{
"epoch": 0.5408146019942538,
"grad_norm": 2.607021547053881,
"learning_rate": 9.806525122051276e-06,
"loss": 0.3607,
"step": 800
},
{
"epoch": 0.5475747845191821,
"grad_norm": 2.0297130039478573,
"learning_rate": 9.795547544934964e-06,
"loss": 0.3756,
"step": 810
},
{
"epoch": 0.5543349670441102,
"grad_norm": 2.135186067946862,
"learning_rate": 9.784273563347542e-06,
"loss": 0.3597,
"step": 820
},
{
"epoch": 0.5610951495690384,
"grad_norm": 1.8995362331712584,
"learning_rate": 9.772703874114246e-06,
"loss": 0.3694,
"step": 830
},
{
"epoch": 0.5678553320939665,
"grad_norm": 2.0415002686577775,
"learning_rate": 9.760839192337487e-06,
"loss": 0.3799,
"step": 840
},
{
"epoch": 0.5746155146188947,
"grad_norm": 2.1273417637254366,
"learning_rate": 9.74868025135266e-06,
"loss": 0.3771,
"step": 850
},
{
"epoch": 0.5813756971438229,
"grad_norm": 2.1781086970923575,
"learning_rate": 9.7362278026828e-06,
"loss": 0.3817,
"step": 860
},
{
"epoch": 0.588135879668751,
"grad_norm": 2.1564198664014667,
"learning_rate": 9.723482615992153e-06,
"loss": 0.3755,
"step": 870
},
{
"epoch": 0.5948960621936792,
"grad_norm": 2.2895864922611993,
"learning_rate": 9.710445479038585e-06,
"loss": 0.379,
"step": 880
},
{
"epoch": 0.6016562447186073,
"grad_norm": 1.983081982245503,
"learning_rate": 9.697117197624903e-06,
"loss": 0.3648,
"step": 890
},
{
"epoch": 0.6084164272435356,
"grad_norm": 2.091041703194283,
"learning_rate": 9.683498595549058e-06,
"loss": 0.3537,
"step": 900
},
{
"epoch": 0.6151766097684638,
"grad_norm": 2.236195435477624,
"learning_rate": 9.669590514553202e-06,
"loss": 0.3706,
"step": 910
},
{
"epoch": 0.6219367922933919,
"grad_norm": 1.8500568790108252,
"learning_rate": 9.65539381427169e-06,
"loss": 0.3829,
"step": 920
},
{
"epoch": 0.6286969748183201,
"grad_norm": 1.9798512403639599,
"learning_rate": 9.640909372177923e-06,
"loss": 0.3583,
"step": 930
},
{
"epoch": 0.6354571573432483,
"grad_norm": 2.001295874575185,
"learning_rate": 9.62613808353013e-06,
"loss": 0.3846,
"step": 940
},
{
"epoch": 0.6422173398681764,
"grad_norm": 2.119068814752592,
"learning_rate": 9.611080861316029e-06,
"loss": 0.376,
"step": 950
},
{
"epoch": 0.6489775223931046,
"grad_norm": 1.874754216889785,
"learning_rate": 9.595738636196392e-06,
"loss": 0.3594,
"step": 960
},
{
"epoch": 0.6557377049180327,
"grad_norm": 1.9814730374903968,
"learning_rate": 9.580112356447528e-06,
"loss": 0.3763,
"step": 970
},
{
"epoch": 0.662497887442961,
"grad_norm": 1.6054885549770077,
"learning_rate": 9.56420298790267e-06,
"loss": 0.3539,
"step": 980
},
{
"epoch": 0.6692580699678892,
"grad_norm": 1.9862942162587933,
"learning_rate": 9.548011513892274e-06,
"loss": 0.3686,
"step": 990
},
{
"epoch": 0.6760182524928173,
"grad_norm": 1.697925964076465,
"learning_rate": 9.531538935183252e-06,
"loss": 0.3645,
"step": 1000
},
{
"epoch": 0.6760182524928173,
"eval_loss": 0.36739876866340637,
"eval_runtime": 1323.0599,
"eval_samples_per_second": 3.976,
"eval_steps_per_second": 0.497,
"step": 1000
},
{
"epoch": 0.6827784350177455,
"grad_norm": 2.258646441282196,
"learning_rate": 9.5147862699171e-06,
"loss": 0.3686,
"step": 1010
},
{
"epoch": 0.6895386175426736,
"grad_norm": 1.9875295083096067,
"learning_rate": 9.497754553546992e-06,
"loss": 0.3736,
"step": 1020
},
{
"epoch": 0.6962988000676018,
"grad_norm": 1.9408896050189433,
"learning_rate": 9.480444838773753e-06,
"loss": 0.3542,
"step": 1030
},
{
"epoch": 0.70305898259253,
"grad_norm": 2.517314352432714,
"learning_rate": 9.462858195480814e-06,
"loss": 0.3647,
"step": 1040
},
{
"epoch": 0.7098191651174581,
"grad_norm": 2.045943843085583,
"learning_rate": 9.444995710668074e-06,
"loss": 0.3797,
"step": 1050
},
{
"epoch": 0.7165793476423864,
"grad_norm": 1.9093836961309305,
"learning_rate": 9.42685848838472e-06,
"loss": 0.3413,
"step": 1060
},
{
"epoch": 0.7233395301673146,
"grad_norm": 2.022174523080341,
"learning_rate": 9.408447649660985e-06,
"loss": 0.3676,
"step": 1070
},
{
"epoch": 0.7300997126922427,
"grad_norm": 1.8275705704002196,
"learning_rate": 9.38976433243886e-06,
"loss": 0.3685,
"step": 1080
},
{
"epoch": 0.7368598952171709,
"grad_norm": 1.7772669666798888,
"learning_rate": 9.370809691501753e-06,
"loss": 0.3708,
"step": 1090
},
{
"epoch": 0.743620077742099,
"grad_norm": 1.9218973995164992,
"learning_rate": 9.351584898403129e-06,
"loss": 0.3546,
"step": 1100
},
{
"epoch": 0.7503802602670272,
"grad_norm": 1.8585271029759325,
"learning_rate": 9.332091141394082e-06,
"loss": 0.351,
"step": 1110
},
{
"epoch": 0.7571404427919554,
"grad_norm": 1.8921724238963678,
"learning_rate": 9.312329625349903e-06,
"loss": 0.3561,
"step": 1120
},
{
"epoch": 0.7639006253168835,
"grad_norm": 2.699199676621496,
"learning_rate": 9.292301571695603e-06,
"loss": 0.3621,
"step": 1130
},
{
"epoch": 0.7706608078418117,
"grad_norm": 1.7918758876099616,
"learning_rate": 9.27200821833042e-06,
"loss": 0.3481,
"step": 1140
},
{
"epoch": 0.77742099036674,
"grad_norm": 1.6841732472542632,
"learning_rate": 9.251450819551305e-06,
"loss": 0.3518,
"step": 1150
},
{
"epoch": 0.7841811728916681,
"grad_norm": 1.8263599680378415,
"learning_rate": 9.2306306459754e-06,
"loss": 0.3578,
"step": 1160
},
{
"epoch": 0.7909413554165963,
"grad_norm": 1.9440999538041503,
"learning_rate": 9.2095489844615e-06,
"loss": 0.3561,
"step": 1170
},
{
"epoch": 0.7977015379415244,
"grad_norm": 2.0375108827553725,
"learning_rate": 9.188207138030518e-06,
"loss": 0.3547,
"step": 1180
},
{
"epoch": 0.8044617204664526,
"grad_norm": 1.8656977480472188,
"learning_rate": 9.166606425784939e-06,
"loss": 0.3353,
"step": 1190
},
{
"epoch": 0.8112219029913807,
"grad_norm": 1.8340532352178343,
"learning_rate": 9.144748182827305e-06,
"loss": 0.3641,
"step": 1200
},
{
"epoch": 0.8179820855163089,
"grad_norm": 1.8770982817901785,
"learning_rate": 9.122633760177674e-06,
"loss": 0.35,
"step": 1210
},
{
"epoch": 0.8247422680412371,
"grad_norm": 1.7126903025921292,
"learning_rate": 9.10026452469013e-06,
"loss": 0.3483,
"step": 1220
},
{
"epoch": 0.8315024505661653,
"grad_norm": 1.845094017702633,
"learning_rate": 9.077641858968302e-06,
"loss": 0.3463,
"step": 1230
},
{
"epoch": 0.8382626330910935,
"grad_norm": 1.6919845905215602,
"learning_rate": 9.054767161279891e-06,
"loss": 0.3493,
"step": 1240
},
{
"epoch": 0.8450228156160217,
"grad_norm": 41.7754958887055,
"learning_rate": 9.031641845470265e-06,
"loss": 0.3691,
"step": 1250
},
{
"epoch": 0.8517829981409498,
"grad_norm": 1.6803994335239858,
"learning_rate": 9.008267340875062e-06,
"loss": 0.3404,
"step": 1260
},
{
"epoch": 0.858543180665878,
"grad_norm": 1.6747949593133225,
"learning_rate": 8.984645092231839e-06,
"loss": 0.337,
"step": 1270
},
{
"epoch": 0.8653033631908061,
"grad_norm": 1.9888542335284893,
"learning_rate": 8.960776559590794e-06,
"loss": 0.3531,
"step": 1280
},
{
"epoch": 0.8720635457157343,
"grad_norm": 1.8242235825875925,
"learning_rate": 8.936663218224504e-06,
"loss": 0.3468,
"step": 1290
},
{
"epoch": 0.8788237282406625,
"grad_norm": 1.8091464965495379,
"learning_rate": 8.91230655853675e-06,
"loss": 0.3451,
"step": 1300
},
{
"epoch": 0.8855839107655906,
"grad_norm": 1.6077172216059503,
"learning_rate": 8.887708085970395e-06,
"loss": 0.3606,
"step": 1310
},
{
"epoch": 0.8923440932905189,
"grad_norm": 1.863072270952894,
"learning_rate": 8.862869320914342e-06,
"loss": 0.3362,
"step": 1320
},
{
"epoch": 0.899104275815447,
"grad_norm": 1.6689485544997376,
"learning_rate": 8.837791798609548e-06,
"loss": 0.3556,
"step": 1330
},
{
"epoch": 0.9058644583403752,
"grad_norm": 1.7200717303760475,
"learning_rate": 8.812477069054145e-06,
"loss": 0.3478,
"step": 1340
},
{
"epoch": 0.9126246408653034,
"grad_norm": 2.137634965409493,
"learning_rate": 8.786926696907635e-06,
"loss": 0.3367,
"step": 1350
},
{
"epoch": 0.9193848233902315,
"grad_norm": 1.8766558650581284,
"learning_rate": 8.761142261394176e-06,
"loss": 0.3384,
"step": 1360
},
{
"epoch": 0.9261450059151597,
"grad_norm": 1.79644209869537,
"learning_rate": 8.735125356204982e-06,
"loss": 0.3409,
"step": 1370
},
{
"epoch": 0.9329051884400879,
"grad_norm": 1.878014748264182,
"learning_rate": 8.708877589399805e-06,
"loss": 0.3571,
"step": 1380
},
{
"epoch": 0.939665370965016,
"grad_norm": 1.6990828286145954,
"learning_rate": 8.682400583307562e-06,
"loss": 0.3434,
"step": 1390
},
{
"epoch": 0.9464255534899443,
"grad_norm": 1.7992025269775698,
"learning_rate": 8.655695974426049e-06,
"loss": 0.3469,
"step": 1400
},
{
"epoch": 0.9531857360148724,
"grad_norm": 1.9318642460759288,
"learning_rate": 8.628765413320793e-06,
"loss": 0.3567,
"step": 1410
},
{
"epoch": 0.9599459185398006,
"grad_norm": 1.7485812674142844,
"learning_rate": 8.60161056452304e-06,
"loss": 0.3295,
"step": 1420
},
{
"epoch": 0.9667061010647288,
"grad_norm": 1.9314544861372662,
"learning_rate": 8.574233106426866e-06,
"loss": 0.331,
"step": 1430
},
{
"epoch": 0.9734662835896569,
"grad_norm": 1.6993343654728899,
"learning_rate": 8.546634731185446e-06,
"loss": 0.353,
"step": 1440
},
{
"epoch": 0.9802264661145851,
"grad_norm": 1.7502543394189403,
"learning_rate": 8.518817144606451e-06,
"loss": 0.3316,
"step": 1450
},
{
"epoch": 0.9869866486395132,
"grad_norm": 1.9261600970610924,
"learning_rate": 8.490782066046634e-06,
"loss": 0.3628,
"step": 1460
},
{
"epoch": 0.9937468311644414,
"grad_norm": 1.7416309439075797,
"learning_rate": 8.462531228305546e-06,
"loss": 0.3231,
"step": 1470
},
{
"epoch": 1.0,
"grad_norm": 3.593217730508142,
"learning_rate": 8.434066377518437e-06,
"loss": 0.3394,
"step": 1480
},
{
"epoch": 1.0067601825249282,
"grad_norm": 1.6132270094363823,
"learning_rate": 8.405389273048334e-06,
"loss": 0.2478,
"step": 1490
},
{
"epoch": 1.0135203650498563,
"grad_norm": 1.6776338845512389,
"learning_rate": 8.376501687377297e-06,
"loss": 0.2299,
"step": 1500
},
{
"epoch": 1.0135203650498563,
"eval_loss": 0.351112425327301,
"eval_runtime": 1325.3556,
"eval_samples_per_second": 3.969,
"eval_steps_per_second": 0.496,
"step": 1500
}
],
"logging_steps": 10,
"max_steps": 4440,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 576417156300800.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}