{ "best_global_step": null, "best_metric": null, "best_model_checkpoint": null, "epoch": 2.1984838042729153, "eval_steps": 500, "global_step": 600, "is_hyper_param_search": false, "is_local_process_zero": true, "is_world_process_zero": true, "log_history": [ { "epoch": 0.036756260050539856, "grad_norm": 1.0688965320587158, "learning_rate": 1.0975609756097562e-05, "loss": 1.1745, "step": 10 }, { "epoch": 0.07351252010107971, "grad_norm": 0.6050378084182739, "learning_rate": 2.3170731707317075e-05, "loss": 0.9615, "step": 20 }, { "epoch": 0.11026878015161957, "grad_norm": 0.5260648727416992, "learning_rate": 3.5365853658536584e-05, "loss": 0.9305, "step": 30 }, { "epoch": 0.14702504020215942, "grad_norm": 0.4684043824672699, "learning_rate": 4.75609756097561e-05, "loss": 0.8995, "step": 40 }, { "epoch": 0.1837813002526993, "grad_norm": 0.4516962468624115, "learning_rate": 4.998685537302135e-05, "loss": 0.8836, "step": 50 }, { "epoch": 0.22053756030323915, "grad_norm": 0.4993128776550293, "learning_rate": 4.9933479014963055e-05, "loss": 0.8635, "step": 60 }, { "epoch": 0.257293820353779, "grad_norm": 0.4929085373878479, "learning_rate": 4.9839137016353147e-05, "loss": 0.8769, "step": 70 }, { "epoch": 0.29405008040431885, "grad_norm": 0.4800896644592285, "learning_rate": 4.970398438084758e-05, "loss": 0.9013, "step": 80 }, { "epoch": 0.33080634045485874, "grad_norm": 0.42861899733543396, "learning_rate": 4.952824316387163e-05, "loss": 0.8876, "step": 90 }, { "epoch": 0.3675626005053986, "grad_norm": 0.45206961035728455, "learning_rate": 4.931220210778332e-05, "loss": 0.8549, "step": 100 }, { "epoch": 0.4043188605559384, "grad_norm": 0.7127406597137451, "learning_rate": 4.905621616747054e-05, "loss": 0.8494, "step": 110 }, { "epoch": 0.4410751206064783, "grad_norm": 0.40425172448158264, "learning_rate": 4.876070592716105e-05, "loss": 0.8437, "step": 120 }, { "epoch": 0.47783138065701813, "grad_norm": 0.4734930992126465, "learning_rate": 4.842615690940373e-05, "loss": 0.8411, "step": 130 }, { "epoch": 0.514587640707558, "grad_norm": 0.4126618802547455, "learning_rate": 4.80531187773563e-05, "loss": 0.8658, "step": 140 }, { "epoch": 0.5513439007580979, "grad_norm": 0.4084201157093048, "learning_rate": 4.7642204431690206e-05, "loss": 0.8467, "step": 150 }, { "epoch": 0.5881001608086377, "grad_norm": 0.38719961047172546, "learning_rate": 4.71940890035964e-05, "loss": 0.8834, "step": 160 }, { "epoch": 0.6248564208591776, "grad_norm": 0.37913328409194946, "learning_rate": 4.6709508745546575e-05, "loss": 0.8286, "step": 170 }, { "epoch": 0.6616126809097175, "grad_norm": 0.35573574900627136, "learning_rate": 4.618925982163232e-05, "loss": 0.8447, "step": 180 }, { "epoch": 0.6983689409602573, "grad_norm": 0.3563328683376312, "learning_rate": 4.563419699946956e-05, "loss": 0.855, "step": 190 }, { "epoch": 0.7351252010107971, "grad_norm": 0.3638900816440582, "learning_rate": 4.504523224581762e-05, "loss": 0.864, "step": 200 }, { "epoch": 0.771881461061337, "grad_norm": 0.38919857144355774, "learning_rate": 4.442333322822028e-05, "loss": 0.8375, "step": 210 }, { "epoch": 0.8086377211118768, "grad_norm": 0.3908405303955078, "learning_rate": 4.376952172513046e-05, "loss": 0.8642, "step": 220 }, { "epoch": 0.8453939811624167, "grad_norm": 0.37455061078071594, "learning_rate": 4.308487194713097e-05, "loss": 0.8775, "step": 230 }, { "epoch": 0.8821502412129566, "grad_norm": 0.3894438147544861, "learning_rate": 4.2370508772009334e-05, "loss": 0.8454, "step": 240 }, { "epoch": 0.9189065012634965, "grad_norm": 0.40810340642929077, "learning_rate": 4.162760589658649e-05, "loss": 0.8399, "step": 250 }, { "epoch": 0.9556627613140363, "grad_norm": 0.36629530787467957, "learning_rate": 4.0857383908336076e-05, "loss": 0.8519, "step": 260 }, { "epoch": 0.9924190213645762, "grad_norm": 0.37508270144462585, "learning_rate": 4.0061108279962336e-05, "loss": 0.8629, "step": 270 }, { "epoch": 1.025729382035378, "grad_norm": 0.5247800350189209, "learning_rate": 3.924008729023185e-05, "loss": 0.7402, "step": 280 }, { "epoch": 1.0624856420859177, "grad_norm": 0.39590781927108765, "learning_rate": 3.8395669874474915e-05, "loss": 0.7421, "step": 290 }, { "epoch": 1.0992419021364577, "grad_norm": 0.3770304322242737, "learning_rate": 3.752924340828837e-05, "loss": 0.6945, "step": 300 }, { "epoch": 1.1359981621869975, "grad_norm": 0.355007529258728, "learning_rate": 3.66422314280811e-05, "loss": 0.6956, "step": 310 }, { "epoch": 1.1727544222375372, "grad_norm": 0.35693830251693726, "learning_rate": 3.573609129220748e-05, "loss": 0.6778, "step": 320 }, { "epoch": 1.2095106822880772, "grad_norm": 0.3579568862915039, "learning_rate": 3.4812311786531426e-05, "loss": 0.6869, "step": 330 }, { "epoch": 1.246266942338617, "grad_norm": 0.332889199256897, "learning_rate": 3.387241067835513e-05, "loss": 0.6995, "step": 340 }, { "epoch": 1.283023202389157, "grad_norm": 0.3603542447090149, "learning_rate": 3.2917932222731325e-05, "loss": 0.7057, "step": 350 }, { "epoch": 1.3197794624396968, "grad_norm": 0.3424987196922302, "learning_rate": 3.195044462525636e-05, "loss": 0.6823, "step": 360 }, { "epoch": 1.3565357224902366, "grad_norm": 0.349817156791687, "learning_rate": 3.097153746551248e-05, "loss": 0.7167, "step": 370 }, { "epoch": 1.3932919825407764, "grad_norm": 0.340465247631073, "learning_rate": 2.998281908539272e-05, "loss": 0.6917, "step": 380 }, { "epoch": 1.4300482425913164, "grad_norm": 0.3161555528640747, "learning_rate": 2.898591394659948e-05, "loss": 0.7037, "step": 390 }, { "epoch": 1.4668045026418561, "grad_norm": 0.3468339741230011, "learning_rate": 2.798245996165813e-05, "loss": 0.7021, "step": 400 }, { "epoch": 1.5035607626923961, "grad_norm": 0.34667858481407166, "learning_rate": 2.697410580283107e-05, "loss": 0.7188, "step": 410 }, { "epoch": 1.540317022742936, "grad_norm": 0.3385638892650604, "learning_rate": 2.5962508193353542e-05, "loss": 0.6913, "step": 420 }, { "epoch": 1.5770732827934757, "grad_norm": 0.3313932716846466, "learning_rate": 2.4949329185441666e-05, "loss": 0.696, "step": 430 }, { "epoch": 1.6138295428440155, "grad_norm": 0.335350900888443, "learning_rate": 2.3936233429545054e-05, "loss": 0.7101, "step": 440 }, { "epoch": 1.6505858028945555, "grad_norm": 0.33174756169319153, "learning_rate": 2.2924885439330477e-05, "loss": 0.6899, "step": 450 }, { "epoch": 1.6873420629450955, "grad_norm": 0.35936641693115234, "learning_rate": 2.1916946856890293e-05, "loss": 0.6858, "step": 460 }, { "epoch": 1.7240983229956353, "grad_norm": 0.351677805185318, "learning_rate": 2.0914073722668788e-05, "loss": 0.6687, "step": 470 }, { "epoch": 1.760854583046175, "grad_norm": 0.36415979266166687, "learning_rate": 1.9917913754592183e-05, "loss": 0.6877, "step": 480 }, { "epoch": 1.7976108430967148, "grad_norm": 0.31224891543388367, "learning_rate": 1.8930103640872313e-05, "loss": 0.7051, "step": 490 }, { "epoch": 1.8343671031472548, "grad_norm": 0.3152935206890106, "learning_rate": 1.795226635093226e-05, "loss": 0.6666, "step": 500 }, { "epoch": 1.8711233631977946, "grad_norm": 0.3329021632671356, "learning_rate": 1.6986008468871783e-05, "loss": 0.6766, "step": 510 }, { "epoch": 1.9078796232483346, "grad_norm": 0.3460406959056854, "learning_rate": 1.6032917553853936e-05, "loss": 0.6735, "step": 520 }, { "epoch": 1.9446358832988744, "grad_norm": 0.3377283215522766, "learning_rate": 1.509455953174948e-05, "loss": 0.669, "step": 530 }, { "epoch": 1.9813921433494142, "grad_norm": 0.35212215781211853, "learning_rate": 1.4172476122324806e-05, "loss": 0.6608, "step": 540 }, { "epoch": 2.014702504020216, "grad_norm": 0.4792023003101349, "learning_rate": 1.3268182306200405e-05, "loss": 0.5849, "step": 550 }, { "epoch": 2.051458764070756, "grad_norm": 0.37128546833992004, "learning_rate": 1.2383163835741692e-05, "loss": 0.4627, "step": 560 }, { "epoch": 2.0882150241212956, "grad_norm": 0.4129978120326996, "learning_rate": 1.1518874793971754e-05, "loss": 0.451, "step": 570 }, { "epoch": 2.1249712841718353, "grad_norm": 0.37535038590431213, "learning_rate": 1.0676735205516788e-05, "loss": 0.4377, "step": 580 }, { "epoch": 2.1617275442223756, "grad_norm": 0.394779771566391, "learning_rate": 9.858128703509348e-06, "loss": 0.4401, "step": 590 }, { "epoch": 2.1984838042729153, "grad_norm": 0.3875374495983124, "learning_rate": 9.064400256282757e-06, "loss": 0.4448, "step": 600 } ], "logging_steps": 10, "max_steps": 816, "num_input_tokens_seen": 0, "num_train_epochs": 3, "save_steps": 50, "stateful_callbacks": { "TrainerControl": { "args": { "should_epoch_stop": false, "should_evaluate": false, "should_log": false, "should_save": true, "should_training_stop": false }, "attributes": {} } }, "total_flos": 1.9960308772634624e+16, "train_batch_size": 1, "trial_name": null, "trial_params": null }