| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9946332737030411, | |
| "eval_steps": 500, | |
| "global_step": 2780, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0035778175313059034, | |
| "grad_norm": 0.5825825929641724, | |
| "learning_rate": 0.00019928443649373882, | |
| "loss": 1.248, | |
| "num_input_tokens_seen": 6646, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.007155635062611807, | |
| "grad_norm": 0.5380188822746277, | |
| "learning_rate": 0.00019856887298747765, | |
| "loss": 0.5478, | |
| "num_input_tokens_seen": 13063, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.01073345259391771, | |
| "grad_norm": 0.3872911036014557, | |
| "learning_rate": 0.00019785330948121648, | |
| "loss": 0.5135, | |
| "num_input_tokens_seen": 19512, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.014311270125223614, | |
| "grad_norm": 0.4991438686847687, | |
| "learning_rate": 0.0001971377459749553, | |
| "loss": 0.5092, | |
| "num_input_tokens_seen": 26884, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.017889087656529516, | |
| "grad_norm": 0.6744784116744995, | |
| "learning_rate": 0.0001964221824686941, | |
| "loss": 0.4799, | |
| "num_input_tokens_seen": 34831, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.02146690518783542, | |
| "grad_norm": 0.5413841009140015, | |
| "learning_rate": 0.00019570661896243293, | |
| "loss": 0.4738, | |
| "num_input_tokens_seen": 40074, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.025044722719141325, | |
| "grad_norm": 0.33517029881477356, | |
| "learning_rate": 0.00019499105545617174, | |
| "loss": 0.4907, | |
| "num_input_tokens_seen": 47194, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.028622540250447227, | |
| "grad_norm": 0.34275758266448975, | |
| "learning_rate": 0.00019427549194991057, | |
| "loss": 0.4642, | |
| "num_input_tokens_seen": 53439, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.03220035778175313, | |
| "grad_norm": 0.4074145257472992, | |
| "learning_rate": 0.00019355992844364938, | |
| "loss": 0.4431, | |
| "num_input_tokens_seen": 59366, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.03577817531305903, | |
| "grad_norm": 0.372760534286499, | |
| "learning_rate": 0.0001928443649373882, | |
| "loss": 0.4824, | |
| "num_input_tokens_seen": 66414, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.03935599284436494, | |
| "grad_norm": 0.35169002413749695, | |
| "learning_rate": 0.00019212880143112702, | |
| "loss": 0.4863, | |
| "num_input_tokens_seen": 73451, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.04293381037567084, | |
| "grad_norm": 0.4088020324707031, | |
| "learning_rate": 0.00019141323792486585, | |
| "loss": 0.4792, | |
| "num_input_tokens_seen": 81934, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.046511627906976744, | |
| "grad_norm": 0.40062326192855835, | |
| "learning_rate": 0.00019069767441860466, | |
| "loss": 0.4607, | |
| "num_input_tokens_seen": 88335, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.05008944543828265, | |
| "grad_norm": 0.5044320225715637, | |
| "learning_rate": 0.0001899821109123435, | |
| "loss": 0.456, | |
| "num_input_tokens_seen": 96192, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.05366726296958855, | |
| "grad_norm": 0.4566495418548584, | |
| "learning_rate": 0.0001892665474060823, | |
| "loss": 0.429, | |
| "num_input_tokens_seen": 101609, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.057245080500894455, | |
| "grad_norm": 0.4657338559627533, | |
| "learning_rate": 0.0001885509838998211, | |
| "loss": 0.4445, | |
| "num_input_tokens_seen": 107467, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.06082289803220036, | |
| "grad_norm": 0.5721924304962158, | |
| "learning_rate": 0.00018783542039355994, | |
| "loss": 0.4304, | |
| "num_input_tokens_seen": 113612, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.06440071556350627, | |
| "grad_norm": 0.2883516848087311, | |
| "learning_rate": 0.00018711985688729877, | |
| "loss": 0.4525, | |
| "num_input_tokens_seen": 121416, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.06797853309481217, | |
| "grad_norm": 0.5061659216880798, | |
| "learning_rate": 0.00018640429338103758, | |
| "loss": 0.4439, | |
| "num_input_tokens_seen": 128284, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.07155635062611806, | |
| "grad_norm": 0.3323754072189331, | |
| "learning_rate": 0.00018568872987477638, | |
| "loss": 0.4489, | |
| "num_input_tokens_seen": 135571, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.07513416815742398, | |
| "grad_norm": 0.5354058742523193, | |
| "learning_rate": 0.00018497316636851522, | |
| "loss": 0.4634, | |
| "num_input_tokens_seen": 141479, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.07871198568872988, | |
| "grad_norm": 0.4328760504722595, | |
| "learning_rate": 0.00018425760286225405, | |
| "loss": 0.4545, | |
| "num_input_tokens_seen": 147677, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.08228980322003578, | |
| "grad_norm": 0.28675127029418945, | |
| "learning_rate": 0.00018354203935599286, | |
| "loss": 0.4814, | |
| "num_input_tokens_seen": 154847, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.08586762075134168, | |
| "grad_norm": 0.31572216749191284, | |
| "learning_rate": 0.00018282647584973166, | |
| "loss": 0.446, | |
| "num_input_tokens_seen": 162267, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.08944543828264759, | |
| "grad_norm": 0.360166996717453, | |
| "learning_rate": 0.0001821109123434705, | |
| "loss": 0.4549, | |
| "num_input_tokens_seen": 168817, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.09302325581395349, | |
| "grad_norm": 0.342385470867157, | |
| "learning_rate": 0.0001813953488372093, | |
| "loss": 0.4297, | |
| "num_input_tokens_seen": 174828, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.09660107334525939, | |
| "grad_norm": 0.37481924891471863, | |
| "learning_rate": 0.00018067978533094814, | |
| "loss": 0.4314, | |
| "num_input_tokens_seen": 181578, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.1001788908765653, | |
| "grad_norm": 0.28545519709587097, | |
| "learning_rate": 0.00017996422182468694, | |
| "loss": 0.4332, | |
| "num_input_tokens_seen": 187842, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.1037567084078712, | |
| "grad_norm": 0.38877248764038086, | |
| "learning_rate": 0.00017924865831842578, | |
| "loss": 0.4248, | |
| "num_input_tokens_seen": 194651, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.1073345259391771, | |
| "grad_norm": 0.30087631940841675, | |
| "learning_rate": 0.00017853309481216458, | |
| "loss": 0.4405, | |
| "num_input_tokens_seen": 202203, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.11091234347048301, | |
| "grad_norm": 0.33470776677131653, | |
| "learning_rate": 0.00017781753130590342, | |
| "loss": 0.4485, | |
| "num_input_tokens_seen": 210364, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.11449016100178891, | |
| "grad_norm": 0.44205668568611145, | |
| "learning_rate": 0.00017710196779964222, | |
| "loss": 0.4431, | |
| "num_input_tokens_seen": 217965, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.11806797853309481, | |
| "grad_norm": 0.39270082116127014, | |
| "learning_rate": 0.00017638640429338106, | |
| "loss": 0.4475, | |
| "num_input_tokens_seen": 224249, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.12164579606440072, | |
| "grad_norm": 0.37138304114341736, | |
| "learning_rate": 0.00017567084078711986, | |
| "loss": 0.4139, | |
| "num_input_tokens_seen": 230513, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.1252236135957066, | |
| "grad_norm": 0.32019296288490295, | |
| "learning_rate": 0.00017495527728085867, | |
| "loss": 0.4488, | |
| "num_input_tokens_seen": 237103, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.12880143112701253, | |
| "grad_norm": 0.42748796939849854, | |
| "learning_rate": 0.0001742397137745975, | |
| "loss": 0.4059, | |
| "num_input_tokens_seen": 242530, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.13237924865831843, | |
| "grad_norm": 0.39997342228889465, | |
| "learning_rate": 0.00017352415026833634, | |
| "loss": 0.4069, | |
| "num_input_tokens_seen": 247524, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.13595706618962433, | |
| "grad_norm": 0.45402902364730835, | |
| "learning_rate": 0.00017280858676207514, | |
| "loss": 0.4354, | |
| "num_input_tokens_seen": 252526, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.13953488372093023, | |
| "grad_norm": 0.500701904296875, | |
| "learning_rate": 0.00017209302325581395, | |
| "loss": 0.4421, | |
| "num_input_tokens_seen": 258974, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.14311270125223613, | |
| "grad_norm": 0.2604714035987854, | |
| "learning_rate": 0.00017137745974955278, | |
| "loss": 0.4338, | |
| "num_input_tokens_seen": 265411, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.14669051878354203, | |
| "grad_norm": 0.5467566251754761, | |
| "learning_rate": 0.00017066189624329162, | |
| "loss": 0.4336, | |
| "num_input_tokens_seen": 271069, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.15026833631484796, | |
| "grad_norm": 0.4278429448604584, | |
| "learning_rate": 0.00016994633273703042, | |
| "loss": 0.4384, | |
| "num_input_tokens_seen": 277913, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.15384615384615385, | |
| "grad_norm": 0.38349634408950806, | |
| "learning_rate": 0.00016923076923076923, | |
| "loss": 0.445, | |
| "num_input_tokens_seen": 285726, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.15742397137745975, | |
| "grad_norm": 0.298753559589386, | |
| "learning_rate": 0.00016851520572450806, | |
| "loss": 0.411, | |
| "num_input_tokens_seen": 293886, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.16100178890876565, | |
| "grad_norm": 0.4590730667114258, | |
| "learning_rate": 0.00016779964221824687, | |
| "loss": 0.4441, | |
| "num_input_tokens_seen": 300933, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.16457960644007155, | |
| "grad_norm": 0.25614652037620544, | |
| "learning_rate": 0.0001670840787119857, | |
| "loss": 0.4201, | |
| "num_input_tokens_seen": 306782, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.16815742397137745, | |
| "grad_norm": 0.3007524013519287, | |
| "learning_rate": 0.0001663685152057245, | |
| "loss": 0.444, | |
| "num_input_tokens_seen": 314243, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.17173524150268335, | |
| "grad_norm": 0.4292968809604645, | |
| "learning_rate": 0.00016565295169946334, | |
| "loss": 0.4089, | |
| "num_input_tokens_seen": 319432, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.17531305903398928, | |
| "grad_norm": 0.31139102578163147, | |
| "learning_rate": 0.00016493738819320215, | |
| "loss": 0.4244, | |
| "num_input_tokens_seen": 325211, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.17889087656529518, | |
| "grad_norm": 0.34276363253593445, | |
| "learning_rate": 0.00016422182468694098, | |
| "loss": 0.4133, | |
| "num_input_tokens_seen": 332100, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.18246869409660108, | |
| "grad_norm": 0.39435234665870667, | |
| "learning_rate": 0.0001635062611806798, | |
| "loss": 0.4428, | |
| "num_input_tokens_seen": 337848, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.18604651162790697, | |
| "grad_norm": 0.2763209640979767, | |
| "learning_rate": 0.00016279069767441862, | |
| "loss": 0.4464, | |
| "num_input_tokens_seen": 345759, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.18962432915921287, | |
| "grad_norm": 0.2753828763961792, | |
| "learning_rate": 0.00016207513416815743, | |
| "loss": 0.438, | |
| "num_input_tokens_seen": 354078, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.19320214669051877, | |
| "grad_norm": 0.3228139579296112, | |
| "learning_rate": 0.00016135957066189623, | |
| "loss": 0.4314, | |
| "num_input_tokens_seen": 360961, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.1967799642218247, | |
| "grad_norm": 0.30050089955329895, | |
| "learning_rate": 0.00016064400715563507, | |
| "loss": 0.4364, | |
| "num_input_tokens_seen": 367083, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.2003577817531306, | |
| "grad_norm": 0.3418981432914734, | |
| "learning_rate": 0.0001599284436493739, | |
| "loss": 0.4329, | |
| "num_input_tokens_seen": 373196, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.2039355992844365, | |
| "grad_norm": 0.36333030462265015, | |
| "learning_rate": 0.0001592128801431127, | |
| "loss": 0.4482, | |
| "num_input_tokens_seen": 380449, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.2075134168157424, | |
| "grad_norm": 0.2979726195335388, | |
| "learning_rate": 0.00015849731663685151, | |
| "loss": 0.4258, | |
| "num_input_tokens_seen": 386577, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.2110912343470483, | |
| "grad_norm": 0.2969113886356354, | |
| "learning_rate": 0.00015778175313059035, | |
| "loss": 0.433, | |
| "num_input_tokens_seen": 392953, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.2146690518783542, | |
| "grad_norm": 0.4132014811038971, | |
| "learning_rate": 0.00015706618962432918, | |
| "loss": 0.4148, | |
| "num_input_tokens_seen": 399368, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.2182468694096601, | |
| "grad_norm": 0.35863760113716125, | |
| "learning_rate": 0.000156350626118068, | |
| "loss": 0.4105, | |
| "num_input_tokens_seen": 407343, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.22182468694096602, | |
| "grad_norm": 0.287056028842926, | |
| "learning_rate": 0.0001556350626118068, | |
| "loss": 0.4495, | |
| "num_input_tokens_seen": 413867, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.22540250447227192, | |
| "grad_norm": 0.41710999608039856, | |
| "learning_rate": 0.00015491949910554563, | |
| "loss": 0.426, | |
| "num_input_tokens_seen": 422712, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.22898032200357782, | |
| "grad_norm": 0.42847341299057007, | |
| "learning_rate": 0.00015420393559928446, | |
| "loss": 0.4163, | |
| "num_input_tokens_seen": 428523, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.23255813953488372, | |
| "grad_norm": 0.35523882508277893, | |
| "learning_rate": 0.00015348837209302327, | |
| "loss": 0.4237, | |
| "num_input_tokens_seen": 436283, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.23613595706618962, | |
| "grad_norm": 0.32238948345184326, | |
| "learning_rate": 0.00015277280858676207, | |
| "loss": 0.4527, | |
| "num_input_tokens_seen": 444368, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.23971377459749552, | |
| "grad_norm": 0.3075689375400543, | |
| "learning_rate": 0.0001520572450805009, | |
| "loss": 0.4057, | |
| "num_input_tokens_seen": 449919, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.24329159212880144, | |
| "grad_norm": 0.3819845914840698, | |
| "learning_rate": 0.00015134168157423971, | |
| "loss": 0.3951, | |
| "num_input_tokens_seen": 455612, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.24686940966010734, | |
| "grad_norm": 0.3687816560268402, | |
| "learning_rate": 0.00015062611806797855, | |
| "loss": 0.4176, | |
| "num_input_tokens_seen": 461846, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.2504472271914132, | |
| "grad_norm": 0.32070156931877136, | |
| "learning_rate": 0.00014991055456171735, | |
| "loss": 0.4111, | |
| "num_input_tokens_seen": 468333, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.25402504472271914, | |
| "grad_norm": 0.35491397976875305, | |
| "learning_rate": 0.0001491949910554562, | |
| "loss": 0.4215, | |
| "num_input_tokens_seen": 476220, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.25760286225402507, | |
| "grad_norm": 0.3560994863510132, | |
| "learning_rate": 0.000148479427549195, | |
| "loss": 0.4011, | |
| "num_input_tokens_seen": 481867, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.26118067978533094, | |
| "grad_norm": 0.3188275992870331, | |
| "learning_rate": 0.0001477638640429338, | |
| "loss": 0.4372, | |
| "num_input_tokens_seen": 489088, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.26475849731663686, | |
| "grad_norm": 0.3422275483608246, | |
| "learning_rate": 0.00014704830053667263, | |
| "loss": 0.4178, | |
| "num_input_tokens_seen": 494747, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.26833631484794274, | |
| "grad_norm": 0.29682761430740356, | |
| "learning_rate": 0.00014633273703041147, | |
| "loss": 0.4309, | |
| "num_input_tokens_seen": 501076, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.27191413237924866, | |
| "grad_norm": 0.44356122612953186, | |
| "learning_rate": 0.00014561717352415027, | |
| "loss": 0.4113, | |
| "num_input_tokens_seen": 508236, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.27549194991055453, | |
| "grad_norm": 0.4110952317714691, | |
| "learning_rate": 0.00014490161001788908, | |
| "loss": 0.4232, | |
| "num_input_tokens_seen": 514982, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.27906976744186046, | |
| "grad_norm": 0.3558269441127777, | |
| "learning_rate": 0.00014418604651162791, | |
| "loss": 0.4076, | |
| "num_input_tokens_seen": 521248, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.2826475849731664, | |
| "grad_norm": 0.3435330092906952, | |
| "learning_rate": 0.00014347048300536675, | |
| "loss": 0.4109, | |
| "num_input_tokens_seen": 527954, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.28622540250447226, | |
| "grad_norm": 0.2566317021846771, | |
| "learning_rate": 0.00014275491949910555, | |
| "loss": 0.4144, | |
| "num_input_tokens_seen": 535308, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.2898032200357782, | |
| "grad_norm": 0.24462071061134338, | |
| "learning_rate": 0.00014203935599284436, | |
| "loss": 0.3984, | |
| "num_input_tokens_seen": 542297, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.29338103756708406, | |
| "grad_norm": 0.4308456480503082, | |
| "learning_rate": 0.0001413237924865832, | |
| "loss": 0.4142, | |
| "num_input_tokens_seen": 548183, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.29695885509839, | |
| "grad_norm": 0.4515162408351898, | |
| "learning_rate": 0.00014060822898032203, | |
| "loss": 0.404, | |
| "num_input_tokens_seen": 553861, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.3005366726296959, | |
| "grad_norm": 0.3883764147758484, | |
| "learning_rate": 0.00013989266547406083, | |
| "loss": 0.4159, | |
| "num_input_tokens_seen": 559943, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.3041144901610018, | |
| "grad_norm": 0.2981751263141632, | |
| "learning_rate": 0.00013917710196779964, | |
| "loss": 0.4099, | |
| "num_input_tokens_seen": 566038, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.3076923076923077, | |
| "grad_norm": 0.42181873321533203, | |
| "learning_rate": 0.00013846153846153847, | |
| "loss": 0.4365, | |
| "num_input_tokens_seen": 574126, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.3112701252236136, | |
| "grad_norm": 0.31020838022232056, | |
| "learning_rate": 0.00013774597495527728, | |
| "loss": 0.4076, | |
| "num_input_tokens_seen": 581039, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.3148479427549195, | |
| "grad_norm": 0.2839311361312866, | |
| "learning_rate": 0.00013703041144901611, | |
| "loss": 0.4139, | |
| "num_input_tokens_seen": 587384, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.3184257602862254, | |
| "grad_norm": 0.30183765292167664, | |
| "learning_rate": 0.00013631484794275492, | |
| "loss": 0.4412, | |
| "num_input_tokens_seen": 594626, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.3220035778175313, | |
| "grad_norm": 0.24405309557914734, | |
| "learning_rate": 0.00013559928443649375, | |
| "loss": 0.4217, | |
| "num_input_tokens_seen": 601425, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.32558139534883723, | |
| "grad_norm": 0.3325401544570923, | |
| "learning_rate": 0.00013488372093023256, | |
| "loss": 0.399, | |
| "num_input_tokens_seen": 607033, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.3291592128801431, | |
| "grad_norm": 0.4284108877182007, | |
| "learning_rate": 0.00013416815742397137, | |
| "loss": 0.4329, | |
| "num_input_tokens_seen": 612959, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.33273703041144903, | |
| "grad_norm": 0.39697644114494324, | |
| "learning_rate": 0.0001334525939177102, | |
| "loss": 0.4062, | |
| "num_input_tokens_seen": 620181, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.3363148479427549, | |
| "grad_norm": 0.4615568220615387, | |
| "learning_rate": 0.00013273703041144903, | |
| "loss": 0.3929, | |
| "num_input_tokens_seen": 625029, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.33989266547406083, | |
| "grad_norm": 0.3364561200141907, | |
| "learning_rate": 0.00013202146690518784, | |
| "loss": 0.4217, | |
| "num_input_tokens_seen": 630571, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.3434704830053667, | |
| "grad_norm": 0.3077852129936218, | |
| "learning_rate": 0.00013130590339892665, | |
| "loss": 0.4223, | |
| "num_input_tokens_seen": 636343, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.3470483005366726, | |
| "grad_norm": 0.4563674032688141, | |
| "learning_rate": 0.00013059033989266548, | |
| "loss": 0.4093, | |
| "num_input_tokens_seen": 644448, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.35062611806797855, | |
| "grad_norm": 0.3391217291355133, | |
| "learning_rate": 0.00012987477638640431, | |
| "loss": 0.4146, | |
| "num_input_tokens_seen": 651211, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.3542039355992844, | |
| "grad_norm": 0.2930082082748413, | |
| "learning_rate": 0.00012915921288014312, | |
| "loss": 0.4248, | |
| "num_input_tokens_seen": 658984, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.35778175313059035, | |
| "grad_norm": 0.3553549647331238, | |
| "learning_rate": 0.00012844364937388193, | |
| "loss": 0.417, | |
| "num_input_tokens_seen": 666184, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.3613595706618962, | |
| "grad_norm": 0.3744185268878937, | |
| "learning_rate": 0.00012772808586762076, | |
| "loss": 0.3988, | |
| "num_input_tokens_seen": 673125, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 0.36493738819320215, | |
| "grad_norm": 0.4000609219074249, | |
| "learning_rate": 0.0001270125223613596, | |
| "loss": 0.4301, | |
| "num_input_tokens_seen": 680863, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.3685152057245081, | |
| "grad_norm": 0.27482128143310547, | |
| "learning_rate": 0.0001262969588550984, | |
| "loss": 0.4122, | |
| "num_input_tokens_seen": 688582, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 0.37209302325581395, | |
| "grad_norm": 0.438516765832901, | |
| "learning_rate": 0.0001255813953488372, | |
| "loss": 0.4253, | |
| "num_input_tokens_seen": 695760, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.3756708407871199, | |
| "grad_norm": 0.26929447054862976, | |
| "learning_rate": 0.00012486583184257604, | |
| "loss": 0.405, | |
| "num_input_tokens_seen": 702561, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.37924865831842575, | |
| "grad_norm": 0.24823708832263947, | |
| "learning_rate": 0.00012415026833631485, | |
| "loss": 0.4317, | |
| "num_input_tokens_seen": 710659, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.3828264758497317, | |
| "grad_norm": 0.31499966979026794, | |
| "learning_rate": 0.00012343470483005368, | |
| "loss": 0.4357, | |
| "num_input_tokens_seen": 717932, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 0.38640429338103754, | |
| "grad_norm": 0.4409402012825012, | |
| "learning_rate": 0.0001227191413237925, | |
| "loss": 0.408, | |
| "num_input_tokens_seen": 724716, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.38998211091234347, | |
| "grad_norm": 0.35407742857933044, | |
| "learning_rate": 0.00012200357781753131, | |
| "loss": 0.4088, | |
| "num_input_tokens_seen": 730694, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 0.3935599284436494, | |
| "grad_norm": 0.36748751997947693, | |
| "learning_rate": 0.00012128801431127013, | |
| "loss": 0.4208, | |
| "num_input_tokens_seen": 739112, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.39713774597495527, | |
| "grad_norm": 0.3873242437839508, | |
| "learning_rate": 0.00012057245080500895, | |
| "loss": 0.3798, | |
| "num_input_tokens_seen": 744478, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 0.4007155635062612, | |
| "grad_norm": 0.31334424018859863, | |
| "learning_rate": 0.00011985688729874778, | |
| "loss": 0.4179, | |
| "num_input_tokens_seen": 753315, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.40429338103756707, | |
| "grad_norm": 0.42388609051704407, | |
| "learning_rate": 0.00011914132379248659, | |
| "loss": 0.3882, | |
| "num_input_tokens_seen": 759832, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 0.407871198568873, | |
| "grad_norm": 0.3108317255973816, | |
| "learning_rate": 0.00011842576028622541, | |
| "loss": 0.4247, | |
| "num_input_tokens_seen": 766054, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.41144901610017887, | |
| "grad_norm": 0.322694331407547, | |
| "learning_rate": 0.00011771019677996423, | |
| "loss": 0.416, | |
| "num_input_tokens_seen": 772192, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.4150268336314848, | |
| "grad_norm": 0.4780764877796173, | |
| "learning_rate": 0.00011699463327370303, | |
| "loss": 0.3937, | |
| "num_input_tokens_seen": 778619, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.4186046511627907, | |
| "grad_norm": 0.24456287920475006, | |
| "learning_rate": 0.00011627906976744187, | |
| "loss": 0.4264, | |
| "num_input_tokens_seen": 787085, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 0.4221824686940966, | |
| "grad_norm": 0.2968457341194153, | |
| "learning_rate": 0.00011556350626118069, | |
| "loss": 0.4196, | |
| "num_input_tokens_seen": 794103, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.4257602862254025, | |
| "grad_norm": 0.3390592038631439, | |
| "learning_rate": 0.00011484794275491951, | |
| "loss": 0.4073, | |
| "num_input_tokens_seen": 800033, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 0.4293381037567084, | |
| "grad_norm": 0.35398080945014954, | |
| "learning_rate": 0.00011413237924865831, | |
| "loss": 0.4032, | |
| "num_input_tokens_seen": 807607, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.4329159212880143, | |
| "grad_norm": 0.25886887311935425, | |
| "learning_rate": 0.00011341681574239715, | |
| "loss": 0.416, | |
| "num_input_tokens_seen": 813956, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 0.4364937388193202, | |
| "grad_norm": 0.336000919342041, | |
| "learning_rate": 0.00011270125223613597, | |
| "loss": 0.3817, | |
| "num_input_tokens_seen": 821166, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.4400715563506261, | |
| "grad_norm": 0.36095812916755676, | |
| "learning_rate": 0.00011198568872987479, | |
| "loss": 0.4097, | |
| "num_input_tokens_seen": 829461, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 0.44364937388193204, | |
| "grad_norm": 0.34367650747299194, | |
| "learning_rate": 0.0001112701252236136, | |
| "loss": 0.4028, | |
| "num_input_tokens_seen": 835022, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.4472271914132379, | |
| "grad_norm": 0.3959554135799408, | |
| "learning_rate": 0.00011055456171735241, | |
| "loss": 0.402, | |
| "num_input_tokens_seen": 841676, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.45080500894454384, | |
| "grad_norm": 0.4300963580608368, | |
| "learning_rate": 0.00010983899821109125, | |
| "loss": 0.4169, | |
| "num_input_tokens_seen": 848238, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.4543828264758497, | |
| "grad_norm": 0.36439695954322815, | |
| "learning_rate": 0.00010912343470483007, | |
| "loss": 0.4131, | |
| "num_input_tokens_seen": 854482, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 0.45796064400715564, | |
| "grad_norm": 0.4523600935935974, | |
| "learning_rate": 0.00010840787119856887, | |
| "loss": 0.3947, | |
| "num_input_tokens_seen": 860605, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.46153846153846156, | |
| "grad_norm": 0.43886885046958923, | |
| "learning_rate": 0.0001076923076923077, | |
| "loss": 0.4053, | |
| "num_input_tokens_seen": 867693, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 0.46511627906976744, | |
| "grad_norm": 0.33564773201942444, | |
| "learning_rate": 0.00010697674418604651, | |
| "loss": 0.3984, | |
| "num_input_tokens_seen": 873855, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.46869409660107336, | |
| "grad_norm": 0.31822851300239563, | |
| "learning_rate": 0.00010626118067978535, | |
| "loss": 0.4162, | |
| "num_input_tokens_seen": 881261, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 0.47227191413237923, | |
| "grad_norm": 0.2756568193435669, | |
| "learning_rate": 0.00010554561717352415, | |
| "loss": 0.4092, | |
| "num_input_tokens_seen": 888281, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.47584973166368516, | |
| "grad_norm": 0.31455397605895996, | |
| "learning_rate": 0.00010483005366726297, | |
| "loss": 0.3943, | |
| "num_input_tokens_seen": 895446, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 0.47942754919499103, | |
| "grad_norm": 0.25797346234321594, | |
| "learning_rate": 0.0001041144901610018, | |
| "loss": 0.4353, | |
| "num_input_tokens_seen": 902631, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.48300536672629696, | |
| "grad_norm": 0.2746681571006775, | |
| "learning_rate": 0.0001033989266547406, | |
| "loss": 0.3863, | |
| "num_input_tokens_seen": 908599, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.4865831842576029, | |
| "grad_norm": 0.3950134813785553, | |
| "learning_rate": 0.00010268336314847943, | |
| "loss": 0.4049, | |
| "num_input_tokens_seen": 915272, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 0.49016100178890876, | |
| "grad_norm": 0.249292254447937, | |
| "learning_rate": 0.00010196779964221825, | |
| "loss": 0.4064, | |
| "num_input_tokens_seen": 921729, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 0.4937388193202147, | |
| "grad_norm": 0.2855275869369507, | |
| "learning_rate": 0.00010125223613595707, | |
| "loss": 0.4092, | |
| "num_input_tokens_seen": 928645, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 0.49731663685152055, | |
| "grad_norm": 0.3198321759700775, | |
| "learning_rate": 0.00010053667262969588, | |
| "loss": 0.3814, | |
| "num_input_tokens_seen": 933801, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 0.5008944543828264, | |
| "grad_norm": 0.3592207133769989, | |
| "learning_rate": 9.982110912343471e-05, | |
| "loss": 0.3776, | |
| "num_input_tokens_seen": 938905, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.5044722719141324, | |
| "grad_norm": 0.2043834775686264, | |
| "learning_rate": 9.910554561717353e-05, | |
| "loss": 0.4206, | |
| "num_input_tokens_seen": 947665, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 0.5080500894454383, | |
| "grad_norm": 0.3463163673877716, | |
| "learning_rate": 9.838998211091235e-05, | |
| "loss": 0.3953, | |
| "num_input_tokens_seen": 955261, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 0.5116279069767442, | |
| "grad_norm": 0.3044353723526001, | |
| "learning_rate": 9.767441860465116e-05, | |
| "loss": 0.4226, | |
| "num_input_tokens_seen": 961797, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 0.5152057245080501, | |
| "grad_norm": 0.4201965928077698, | |
| "learning_rate": 9.695885509838999e-05, | |
| "loss": 0.4031, | |
| "num_input_tokens_seen": 967800, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 0.518783542039356, | |
| "grad_norm": 0.4523704946041107, | |
| "learning_rate": 9.62432915921288e-05, | |
| "loss": 0.3753, | |
| "num_input_tokens_seen": 973223, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.5223613595706619, | |
| "grad_norm": 0.3966517448425293, | |
| "learning_rate": 9.552772808586763e-05, | |
| "loss": 0.4212, | |
| "num_input_tokens_seen": 981582, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 0.5259391771019678, | |
| "grad_norm": 0.3248649835586548, | |
| "learning_rate": 9.481216457960644e-05, | |
| "loss": 0.4052, | |
| "num_input_tokens_seen": 988838, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 0.5295169946332737, | |
| "grad_norm": 0.23017796874046326, | |
| "learning_rate": 9.409660107334526e-05, | |
| "loss": 0.4019, | |
| "num_input_tokens_seen": 995054, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 0.5330948121645797, | |
| "grad_norm": 0.3645225465297699, | |
| "learning_rate": 9.338103756708408e-05, | |
| "loss": 0.4038, | |
| "num_input_tokens_seen": 1001348, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 0.5366726296958855, | |
| "grad_norm": 0.25708308815956116, | |
| "learning_rate": 9.26654740608229e-05, | |
| "loss": 0.4016, | |
| "num_input_tokens_seen": 1007269, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.5402504472271914, | |
| "grad_norm": 0.2978523373603821, | |
| "learning_rate": 9.194991055456172e-05, | |
| "loss": 0.4019, | |
| "num_input_tokens_seen": 1014734, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 0.5438282647584973, | |
| "grad_norm": 0.32369324564933777, | |
| "learning_rate": 9.123434704830054e-05, | |
| "loss": 0.3867, | |
| "num_input_tokens_seen": 1022130, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 0.5474060822898033, | |
| "grad_norm": 0.34458237886428833, | |
| "learning_rate": 9.051878354203936e-05, | |
| "loss": 0.4247, | |
| "num_input_tokens_seen": 1029090, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 0.5509838998211091, | |
| "grad_norm": 0.34909483790397644, | |
| "learning_rate": 8.980322003577818e-05, | |
| "loss": 0.4164, | |
| "num_input_tokens_seen": 1035252, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 0.554561717352415, | |
| "grad_norm": 0.27858543395996094, | |
| "learning_rate": 8.9087656529517e-05, | |
| "loss": 0.3976, | |
| "num_input_tokens_seen": 1041187, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.5581395348837209, | |
| "grad_norm": 0.34940874576568604, | |
| "learning_rate": 8.837209302325582e-05, | |
| "loss": 0.3889, | |
| "num_input_tokens_seen": 1047255, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 0.5617173524150268, | |
| "grad_norm": 0.3687174916267395, | |
| "learning_rate": 8.765652951699464e-05, | |
| "loss": 0.3794, | |
| "num_input_tokens_seen": 1054159, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 0.5652951699463328, | |
| "grad_norm": 0.3228056728839874, | |
| "learning_rate": 8.694096601073346e-05, | |
| "loss": 0.4095, | |
| "num_input_tokens_seen": 1060584, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 0.5688729874776386, | |
| "grad_norm": 0.41065359115600586, | |
| "learning_rate": 8.622540250447228e-05, | |
| "loss": 0.3741, | |
| "num_input_tokens_seen": 1066384, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 0.5724508050089445, | |
| "grad_norm": 0.3346722722053528, | |
| "learning_rate": 8.55098389982111e-05, | |
| "loss": 0.4068, | |
| "num_input_tokens_seen": 1072956, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.5760286225402504, | |
| "grad_norm": 0.4220598042011261, | |
| "learning_rate": 8.479427549194992e-05, | |
| "loss": 0.4272, | |
| "num_input_tokens_seen": 1079895, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 0.5796064400715564, | |
| "grad_norm": 0.2898394763469696, | |
| "learning_rate": 8.407871198568873e-05, | |
| "loss": 0.4022, | |
| "num_input_tokens_seen": 1087604, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 0.5831842576028623, | |
| "grad_norm": 0.36473724246025085, | |
| "learning_rate": 8.336314847942756e-05, | |
| "loss": 0.3857, | |
| "num_input_tokens_seen": 1094532, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 0.5867620751341681, | |
| "grad_norm": 0.29369673132896423, | |
| "learning_rate": 8.264758497316637e-05, | |
| "loss": 0.405, | |
| "num_input_tokens_seen": 1100973, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 0.590339892665474, | |
| "grad_norm": 0.29171282052993774, | |
| "learning_rate": 8.19320214669052e-05, | |
| "loss": 0.4103, | |
| "num_input_tokens_seen": 1107805, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.59391771019678, | |
| "grad_norm": 0.24154767394065857, | |
| "learning_rate": 8.1216457960644e-05, | |
| "loss": 0.3903, | |
| "num_input_tokens_seen": 1115112, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 0.5974955277280859, | |
| "grad_norm": 0.39852896332740784, | |
| "learning_rate": 8.050089445438284e-05, | |
| "loss": 0.4128, | |
| "num_input_tokens_seen": 1121451, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 0.6010733452593918, | |
| "grad_norm": 0.37065914273262024, | |
| "learning_rate": 7.978533094812165e-05, | |
| "loss": 0.4015, | |
| "num_input_tokens_seen": 1129374, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 0.6046511627906976, | |
| "grad_norm": 0.25734543800354004, | |
| "learning_rate": 7.906976744186047e-05, | |
| "loss": 0.3982, | |
| "num_input_tokens_seen": 1136582, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 0.6082289803220036, | |
| "grad_norm": 0.263864666223526, | |
| "learning_rate": 7.835420393559929e-05, | |
| "loss": 0.414, | |
| "num_input_tokens_seen": 1144688, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.6118067978533095, | |
| "grad_norm": 0.4987502098083496, | |
| "learning_rate": 7.76386404293381e-05, | |
| "loss": 0.4002, | |
| "num_input_tokens_seen": 1151513, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 0.6153846153846154, | |
| "grad_norm": 0.35515296459198, | |
| "learning_rate": 7.692307692307693e-05, | |
| "loss": 0.399, | |
| "num_input_tokens_seen": 1159983, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 0.6189624329159212, | |
| "grad_norm": 0.2761338949203491, | |
| "learning_rate": 7.620751341681575e-05, | |
| "loss": 0.3954, | |
| "num_input_tokens_seen": 1168489, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 0.6225402504472272, | |
| "grad_norm": 0.4277167022228241, | |
| "learning_rate": 7.549194991055457e-05, | |
| "loss": 0.4031, | |
| "num_input_tokens_seen": 1175665, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 0.6261180679785331, | |
| "grad_norm": 0.33866697549819946, | |
| "learning_rate": 7.477638640429339e-05, | |
| "loss": 0.412, | |
| "num_input_tokens_seen": 1181930, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.629695885509839, | |
| "grad_norm": 0.2600657045841217, | |
| "learning_rate": 7.40608228980322e-05, | |
| "loss": 0.3975, | |
| "num_input_tokens_seen": 1189185, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 0.6332737030411449, | |
| "grad_norm": 0.262329638004303, | |
| "learning_rate": 7.334525939177103e-05, | |
| "loss": 0.3841, | |
| "num_input_tokens_seen": 1196883, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 0.6368515205724508, | |
| "grad_norm": 0.35897818207740784, | |
| "learning_rate": 7.262969588550985e-05, | |
| "loss": 0.4004, | |
| "num_input_tokens_seen": 1205284, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 0.6404293381037567, | |
| "grad_norm": 0.3555593192577362, | |
| "learning_rate": 7.191413237924867e-05, | |
| "loss": 0.386, | |
| "num_input_tokens_seen": 1210701, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 0.6440071556350626, | |
| "grad_norm": 0.3081437349319458, | |
| "learning_rate": 7.119856887298749e-05, | |
| "loss": 0.3885, | |
| "num_input_tokens_seen": 1217841, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.6475849731663685, | |
| "grad_norm": 0.43780773878097534, | |
| "learning_rate": 7.048300536672629e-05, | |
| "loss": 0.3893, | |
| "num_input_tokens_seen": 1225095, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 0.6511627906976745, | |
| "grad_norm": 0.381865918636322, | |
| "learning_rate": 6.976744186046513e-05, | |
| "loss": 0.3884, | |
| "num_input_tokens_seen": 1234033, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 0.6547406082289803, | |
| "grad_norm": 0.4573935866355896, | |
| "learning_rate": 6.905187835420393e-05, | |
| "loss": 0.4199, | |
| "num_input_tokens_seen": 1241707, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 0.6583184257602862, | |
| "grad_norm": 0.2445441335439682, | |
| "learning_rate": 6.833631484794277e-05, | |
| "loss": 0.3934, | |
| "num_input_tokens_seen": 1248683, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 0.6618962432915921, | |
| "grad_norm": 0.36599206924438477, | |
| "learning_rate": 6.762075134168157e-05, | |
| "loss": 0.3758, | |
| "num_input_tokens_seen": 1254021, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.6654740608228981, | |
| "grad_norm": 0.29867666959762573, | |
| "learning_rate": 6.69051878354204e-05, | |
| "loss": 0.3846, | |
| "num_input_tokens_seen": 1260977, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 0.669051878354204, | |
| "grad_norm": 0.3520377576351166, | |
| "learning_rate": 6.618962432915921e-05, | |
| "loss": 0.406, | |
| "num_input_tokens_seen": 1268015, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 0.6726296958855098, | |
| "grad_norm": 0.48654794692993164, | |
| "learning_rate": 6.547406082289803e-05, | |
| "loss": 0.3948, | |
| "num_input_tokens_seen": 1273256, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 0.6762075134168157, | |
| "grad_norm": 0.3770650327205658, | |
| "learning_rate": 6.475849731663685e-05, | |
| "loss": 0.3991, | |
| "num_input_tokens_seen": 1280311, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 0.6797853309481217, | |
| "grad_norm": 0.3090347945690155, | |
| "learning_rate": 6.404293381037567e-05, | |
| "loss": 0.3835, | |
| "num_input_tokens_seen": 1286733, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.6833631484794276, | |
| "grad_norm": 0.299207478761673, | |
| "learning_rate": 6.332737030411449e-05, | |
| "loss": 0.3905, | |
| "num_input_tokens_seen": 1293454, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 0.6869409660107334, | |
| "grad_norm": 0.48610031604766846, | |
| "learning_rate": 6.261180679785331e-05, | |
| "loss": 0.374, | |
| "num_input_tokens_seen": 1299015, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 0.6905187835420393, | |
| "grad_norm": 0.3136894404888153, | |
| "learning_rate": 6.189624329159213e-05, | |
| "loss": 0.3952, | |
| "num_input_tokens_seen": 1305291, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 0.6940966010733453, | |
| "grad_norm": 0.3310141861438751, | |
| "learning_rate": 6.118067978533095e-05, | |
| "loss": 0.3968, | |
| "num_input_tokens_seen": 1312434, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 0.6976744186046512, | |
| "grad_norm": 0.27954959869384766, | |
| "learning_rate": 6.0465116279069765e-05, | |
| "loss": 0.4071, | |
| "num_input_tokens_seen": 1320352, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.7012522361359571, | |
| "grad_norm": 0.36177489161491394, | |
| "learning_rate": 5.974955277280859e-05, | |
| "loss": 0.3683, | |
| "num_input_tokens_seen": 1326200, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 0.7048300536672629, | |
| "grad_norm": 0.3898317217826843, | |
| "learning_rate": 5.9033989266547405e-05, | |
| "loss": 0.4181, | |
| "num_input_tokens_seen": 1333316, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 0.7084078711985689, | |
| "grad_norm": 0.2924859821796417, | |
| "learning_rate": 5.831842576028623e-05, | |
| "loss": 0.3828, | |
| "num_input_tokens_seen": 1339225, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 0.7119856887298748, | |
| "grad_norm": 0.3090393543243408, | |
| "learning_rate": 5.7602862254025045e-05, | |
| "loss": 0.3932, | |
| "num_input_tokens_seen": 1346104, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 0.7155635062611807, | |
| "grad_norm": 0.4194253981113434, | |
| "learning_rate": 5.6887298747763865e-05, | |
| "loss": 0.4041, | |
| "num_input_tokens_seen": 1352944, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.7191413237924866, | |
| "grad_norm": 0.3060110807418823, | |
| "learning_rate": 5.6171735241502685e-05, | |
| "loss": 0.3943, | |
| "num_input_tokens_seen": 1359671, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 0.7227191413237924, | |
| "grad_norm": 0.42584308981895447, | |
| "learning_rate": 5.5456171735241505e-05, | |
| "loss": 0.3982, | |
| "num_input_tokens_seen": 1366487, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 0.7262969588550984, | |
| "grad_norm": 0.2706156373023987, | |
| "learning_rate": 5.4740608228980325e-05, | |
| "loss": 0.3715, | |
| "num_input_tokens_seen": 1374437, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 0.7298747763864043, | |
| "grad_norm": 0.27796509861946106, | |
| "learning_rate": 5.4025044722719145e-05, | |
| "loss": 0.3981, | |
| "num_input_tokens_seen": 1382038, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 0.7334525939177102, | |
| "grad_norm": 0.37042665481567383, | |
| "learning_rate": 5.3309481216457965e-05, | |
| "loss": 0.3949, | |
| "num_input_tokens_seen": 1388519, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 0.7370304114490162, | |
| "grad_norm": 0.3565654754638672, | |
| "learning_rate": 5.2593917710196785e-05, | |
| "loss": 0.3869, | |
| "num_input_tokens_seen": 1395702, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 0.740608228980322, | |
| "grad_norm": 0.2900319993495941, | |
| "learning_rate": 5.18783542039356e-05, | |
| "loss": 0.3963, | |
| "num_input_tokens_seen": 1402155, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 0.7441860465116279, | |
| "grad_norm": 0.27532869577407837, | |
| "learning_rate": 5.1162790697674425e-05, | |
| "loss": 0.3961, | |
| "num_input_tokens_seen": 1409417, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 0.7477638640429338, | |
| "grad_norm": 0.3994668424129486, | |
| "learning_rate": 5.044722719141324e-05, | |
| "loss": 0.417, | |
| "num_input_tokens_seen": 1416404, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 0.7513416815742398, | |
| "grad_norm": 0.372256338596344, | |
| "learning_rate": 4.973166368515206e-05, | |
| "loss": 0.3864, | |
| "num_input_tokens_seen": 1423004, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.7549194991055456, | |
| "grad_norm": 0.32723429799079895, | |
| "learning_rate": 4.901610017889088e-05, | |
| "loss": 0.3924, | |
| "num_input_tokens_seen": 1429991, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 0.7584973166368515, | |
| "grad_norm": 0.37468647956848145, | |
| "learning_rate": 4.83005366726297e-05, | |
| "loss": 0.3879, | |
| "num_input_tokens_seen": 1437151, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 0.7620751341681574, | |
| "grad_norm": 0.2736769914627075, | |
| "learning_rate": 4.758497316636852e-05, | |
| "loss": 0.3994, | |
| "num_input_tokens_seen": 1443666, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 0.7656529516994633, | |
| "grad_norm": 0.46681609749794006, | |
| "learning_rate": 4.686940966010734e-05, | |
| "loss": 0.3754, | |
| "num_input_tokens_seen": 1449724, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 0.7692307692307693, | |
| "grad_norm": 0.32450494170188904, | |
| "learning_rate": 4.615384615384616e-05, | |
| "loss": 0.3712, | |
| "num_input_tokens_seen": 1455151, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 0.7728085867620751, | |
| "grad_norm": 0.36587849259376526, | |
| "learning_rate": 4.543828264758497e-05, | |
| "loss": 0.399, | |
| "num_input_tokens_seen": 1461336, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 0.776386404293381, | |
| "grad_norm": 0.2556416392326355, | |
| "learning_rate": 4.472271914132379e-05, | |
| "loss": 0.3925, | |
| "num_input_tokens_seen": 1468427, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 0.7799642218246869, | |
| "grad_norm": 0.38359931111335754, | |
| "learning_rate": 4.400715563506261e-05, | |
| "loss": 0.3971, | |
| "num_input_tokens_seen": 1475721, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 0.7835420393559929, | |
| "grad_norm": 0.2836684584617615, | |
| "learning_rate": 4.329159212880143e-05, | |
| "loss": 0.3814, | |
| "num_input_tokens_seen": 1481691, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 0.7871198568872988, | |
| "grad_norm": 0.3486209809780121, | |
| "learning_rate": 4.257602862254025e-05, | |
| "loss": 0.4195, | |
| "num_input_tokens_seen": 1488275, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.7906976744186046, | |
| "grad_norm": 0.31744202971458435, | |
| "learning_rate": 4.186046511627907e-05, | |
| "loss": 0.3672, | |
| "num_input_tokens_seen": 1496031, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 0.7942754919499105, | |
| "grad_norm": 0.3008958697319031, | |
| "learning_rate": 4.114490161001789e-05, | |
| "loss": 0.3809, | |
| "num_input_tokens_seen": 1503138, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 0.7978533094812165, | |
| "grad_norm": 0.39187124371528625, | |
| "learning_rate": 4.042933810375671e-05, | |
| "loss": 0.3779, | |
| "num_input_tokens_seen": 1509279, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 0.8014311270125224, | |
| "grad_norm": 0.3644355833530426, | |
| "learning_rate": 3.971377459749553e-05, | |
| "loss": 0.3883, | |
| "num_input_tokens_seen": 1515223, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 0.8050089445438283, | |
| "grad_norm": 0.30987629294395447, | |
| "learning_rate": 3.899821109123435e-05, | |
| "loss": 0.3868, | |
| "num_input_tokens_seen": 1523801, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 0.8085867620751341, | |
| "grad_norm": 0.2742662727832794, | |
| "learning_rate": 3.828264758497317e-05, | |
| "loss": 0.369, | |
| "num_input_tokens_seen": 1530483, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 0.8121645796064401, | |
| "grad_norm": 0.31918245553970337, | |
| "learning_rate": 3.756708407871199e-05, | |
| "loss": 0.4161, | |
| "num_input_tokens_seen": 1538556, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 0.815742397137746, | |
| "grad_norm": 0.36853113770484924, | |
| "learning_rate": 3.685152057245081e-05, | |
| "loss": 0.3834, | |
| "num_input_tokens_seen": 1545040, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 0.8193202146690519, | |
| "grad_norm": 0.5155879855155945, | |
| "learning_rate": 3.6135957066189624e-05, | |
| "loss": 0.3814, | |
| "num_input_tokens_seen": 1552028, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 0.8228980322003577, | |
| "grad_norm": 0.2956937551498413, | |
| "learning_rate": 3.5420393559928444e-05, | |
| "loss": 0.395, | |
| "num_input_tokens_seen": 1559137, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.8264758497316637, | |
| "grad_norm": 0.25360018014907837, | |
| "learning_rate": 3.4704830053667264e-05, | |
| "loss": 0.3743, | |
| "num_input_tokens_seen": 1565661, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 0.8300536672629696, | |
| "grad_norm": 0.27616095542907715, | |
| "learning_rate": 3.3989266547406084e-05, | |
| "loss": 0.3971, | |
| "num_input_tokens_seen": 1572070, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 0.8336314847942755, | |
| "grad_norm": 0.3011910021305084, | |
| "learning_rate": 3.3273703041144904e-05, | |
| "loss": 0.4008, | |
| "num_input_tokens_seen": 1579014, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 0.8372093023255814, | |
| "grad_norm": 0.3195567727088928, | |
| "learning_rate": 3.2558139534883724e-05, | |
| "loss": 0.362, | |
| "num_input_tokens_seen": 1586863, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 0.8407871198568873, | |
| "grad_norm": 0.3128429353237152, | |
| "learning_rate": 3.184257602862254e-05, | |
| "loss": 0.373, | |
| "num_input_tokens_seen": 1593227, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 0.8443649373881932, | |
| "grad_norm": 0.3150210678577423, | |
| "learning_rate": 3.112701252236136e-05, | |
| "loss": 0.3722, | |
| "num_input_tokens_seen": 1601444, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 0.8479427549194991, | |
| "grad_norm": 0.44683215022087097, | |
| "learning_rate": 3.041144901610018e-05, | |
| "loss": 0.4009, | |
| "num_input_tokens_seen": 1607535, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 0.851520572450805, | |
| "grad_norm": 0.3482876121997833, | |
| "learning_rate": 2.9695885509839e-05, | |
| "loss": 0.3792, | |
| "num_input_tokens_seen": 1615204, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 0.855098389982111, | |
| "grad_norm": 0.29884225130081177, | |
| "learning_rate": 2.898032200357782e-05, | |
| "loss": 0.4037, | |
| "num_input_tokens_seen": 1621948, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 0.8586762075134168, | |
| "grad_norm": 0.36023974418640137, | |
| "learning_rate": 2.826475849731664e-05, | |
| "loss": 0.3994, | |
| "num_input_tokens_seen": 1628983, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.8622540250447227, | |
| "grad_norm": 0.4629397988319397, | |
| "learning_rate": 2.7549194991055454e-05, | |
| "loss": 0.3868, | |
| "num_input_tokens_seen": 1635950, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 0.8658318425760286, | |
| "grad_norm": 0.4515867829322815, | |
| "learning_rate": 2.6833631484794274e-05, | |
| "loss": 0.3816, | |
| "num_input_tokens_seen": 1643015, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 0.8694096601073346, | |
| "grad_norm": 0.33169665932655334, | |
| "learning_rate": 2.6118067978533094e-05, | |
| "loss": 0.3707, | |
| "num_input_tokens_seen": 1648061, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 0.8729874776386404, | |
| "grad_norm": 0.37958571314811707, | |
| "learning_rate": 2.5402504472271914e-05, | |
| "loss": 0.3905, | |
| "num_input_tokens_seen": 1655672, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 0.8765652951699463, | |
| "grad_norm": 0.5768277645111084, | |
| "learning_rate": 2.4686940966010734e-05, | |
| "loss": 0.3941, | |
| "num_input_tokens_seen": 1662028, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 0.8801431127012522, | |
| "grad_norm": 0.4279649257659912, | |
| "learning_rate": 2.3971377459749554e-05, | |
| "loss": 0.3874, | |
| "num_input_tokens_seen": 1668160, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 0.8837209302325582, | |
| "grad_norm": 0.30337658524513245, | |
| "learning_rate": 2.3255813953488374e-05, | |
| "loss": 0.3951, | |
| "num_input_tokens_seen": 1675594, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 0.8872987477638641, | |
| "grad_norm": 0.2993133068084717, | |
| "learning_rate": 2.2540250447227194e-05, | |
| "loss": 0.3572, | |
| "num_input_tokens_seen": 1682014, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 0.8908765652951699, | |
| "grad_norm": 0.4232490062713623, | |
| "learning_rate": 2.182468694096601e-05, | |
| "loss": 0.3664, | |
| "num_input_tokens_seen": 1687943, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 0.8944543828264758, | |
| "grad_norm": 0.4080519676208496, | |
| "learning_rate": 2.110912343470483e-05, | |
| "loss": 0.391, | |
| "num_input_tokens_seen": 1694486, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.8980322003577818, | |
| "grad_norm": 0.2778749167919159, | |
| "learning_rate": 2.039355992844365e-05, | |
| "loss": 0.3755, | |
| "num_input_tokens_seen": 1701728, | |
| "step": 2510 | |
| }, | |
| { | |
| "epoch": 0.9016100178890877, | |
| "grad_norm": 0.45739054679870605, | |
| "learning_rate": 1.967799642218247e-05, | |
| "loss": 0.3857, | |
| "num_input_tokens_seen": 1707744, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 0.9051878354203936, | |
| "grad_norm": 0.3340027332305908, | |
| "learning_rate": 1.8962432915921287e-05, | |
| "loss": 0.3598, | |
| "num_input_tokens_seen": 1713524, | |
| "step": 2530 | |
| }, | |
| { | |
| "epoch": 0.9087656529516994, | |
| "grad_norm": 0.29041382670402527, | |
| "learning_rate": 1.8246869409660107e-05, | |
| "loss": 0.3874, | |
| "num_input_tokens_seen": 1720278, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 0.9123434704830053, | |
| "grad_norm": 0.23165664076805115, | |
| "learning_rate": 1.7531305903398927e-05, | |
| "loss": 0.4029, | |
| "num_input_tokens_seen": 1729448, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 0.9159212880143113, | |
| "grad_norm": 0.41043820977211, | |
| "learning_rate": 1.6815742397137747e-05, | |
| "loss": 0.3661, | |
| "num_input_tokens_seen": 1735842, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 0.9194991055456172, | |
| "grad_norm": 0.25310322642326355, | |
| "learning_rate": 1.6100178890876567e-05, | |
| "loss": 0.3926, | |
| "num_input_tokens_seen": 1742027, | |
| "step": 2570 | |
| }, | |
| { | |
| "epoch": 0.9230769230769231, | |
| "grad_norm": 0.32354310154914856, | |
| "learning_rate": 1.5384615384615387e-05, | |
| "loss": 0.3949, | |
| "num_input_tokens_seen": 1748817, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 0.9266547406082289, | |
| "grad_norm": 0.31920483708381653, | |
| "learning_rate": 1.4669051878354204e-05, | |
| "loss": 0.3741, | |
| "num_input_tokens_seen": 1754777, | |
| "step": 2590 | |
| }, | |
| { | |
| "epoch": 0.9302325581395349, | |
| "grad_norm": 0.26824864745140076, | |
| "learning_rate": 1.3953488372093024e-05, | |
| "loss": 0.386, | |
| "num_input_tokens_seen": 1762187, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.9338103756708408, | |
| "grad_norm": 0.26331019401550293, | |
| "learning_rate": 1.3237924865831844e-05, | |
| "loss": 0.3666, | |
| "num_input_tokens_seen": 1769463, | |
| "step": 2610 | |
| }, | |
| { | |
| "epoch": 0.9373881932021467, | |
| "grad_norm": 0.3325729966163635, | |
| "learning_rate": 1.2522361359570662e-05, | |
| "loss": 0.4048, | |
| "num_input_tokens_seen": 1777203, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 0.9409660107334525, | |
| "grad_norm": 0.43130627274513245, | |
| "learning_rate": 1.1806797853309482e-05, | |
| "loss": 0.3858, | |
| "num_input_tokens_seen": 1783302, | |
| "step": 2630 | |
| }, | |
| { | |
| "epoch": 0.9445438282647585, | |
| "grad_norm": 0.3261716365814209, | |
| "learning_rate": 1.1091234347048302e-05, | |
| "loss": 0.3877, | |
| "num_input_tokens_seen": 1790778, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 0.9481216457960644, | |
| "grad_norm": 0.3405713140964508, | |
| "learning_rate": 1.037567084078712e-05, | |
| "loss": 0.3674, | |
| "num_input_tokens_seen": 1797992, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 0.9516994633273703, | |
| "grad_norm": 0.4525390565395355, | |
| "learning_rate": 9.660107334525939e-06, | |
| "loss": 0.376, | |
| "num_input_tokens_seen": 1804337, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 0.9552772808586762, | |
| "grad_norm": 0.32699060440063477, | |
| "learning_rate": 8.944543828264759e-06, | |
| "loss": 0.4, | |
| "num_input_tokens_seen": 1811353, | |
| "step": 2670 | |
| }, | |
| { | |
| "epoch": 0.9588550983899821, | |
| "grad_norm": 0.37904760241508484, | |
| "learning_rate": 8.228980322003579e-06, | |
| "loss": 0.396, | |
| "num_input_tokens_seen": 1817606, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 0.962432915921288, | |
| "grad_norm": 0.401185005903244, | |
| "learning_rate": 7.513416815742398e-06, | |
| "loss": 0.4044, | |
| "num_input_tokens_seen": 1826557, | |
| "step": 2690 | |
| }, | |
| { | |
| "epoch": 0.9660107334525939, | |
| "grad_norm": 0.3426940441131592, | |
| "learning_rate": 6.797853309481217e-06, | |
| "loss": 0.3881, | |
| "num_input_tokens_seen": 1832731, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.9695885509838998, | |
| "grad_norm": 0.36823177337646484, | |
| "learning_rate": 6.082289803220036e-06, | |
| "loss": 0.3836, | |
| "num_input_tokens_seen": 1838814, | |
| "step": 2710 | |
| }, | |
| { | |
| "epoch": 0.9731663685152058, | |
| "grad_norm": 0.40080201625823975, | |
| "learning_rate": 5.366726296958855e-06, | |
| "loss": 0.3393, | |
| "num_input_tokens_seen": 1844328, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 0.9767441860465116, | |
| "grad_norm": 0.3002487123012543, | |
| "learning_rate": 4.651162790697674e-06, | |
| "loss": 0.358, | |
| "num_input_tokens_seen": 1850313, | |
| "step": 2730 | |
| }, | |
| { | |
| "epoch": 0.9803220035778175, | |
| "grad_norm": 0.29295697808265686, | |
| "learning_rate": 3.935599284436494e-06, | |
| "loss": 0.3986, | |
| "num_input_tokens_seen": 1857019, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 0.9838998211091234, | |
| "grad_norm": 0.33198845386505127, | |
| "learning_rate": 3.2200357781753134e-06, | |
| "loss": 0.3699, | |
| "num_input_tokens_seen": 1865080, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 0.9874776386404294, | |
| "grad_norm": 0.3775390386581421, | |
| "learning_rate": 2.5044722719141326e-06, | |
| "loss": 0.3659, | |
| "num_input_tokens_seen": 1870697, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 0.9910554561717353, | |
| "grad_norm": 0.2900574803352356, | |
| "learning_rate": 1.7889087656529517e-06, | |
| "loss": 0.3616, | |
| "num_input_tokens_seen": 1876560, | |
| "step": 2770 | |
| }, | |
| { | |
| "epoch": 0.9946332737030411, | |
| "grad_norm": 0.3781711459159851, | |
| "learning_rate": 1.073345259391771e-06, | |
| "loss": 0.3501, | |
| "num_input_tokens_seen": 1882565, | |
| "step": 2780 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 2795, | |
| "num_input_tokens_seen": 1882565, | |
| "num_train_epochs": 1, | |
| "save_steps": 20, | |
| "total_flos": 4.233223877501952e+16, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |