sedrickkeh's picture
End of training
675548f verified
raw
history blame
17.2 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 1005,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.029850746268656716,
"grad_norm": 3.2636772208309153,
"learning_rate": 5e-06,
"loss": 0.7976,
"step": 10
},
{
"epoch": 0.05970149253731343,
"grad_norm": 4.198083473691167,
"learning_rate": 5e-06,
"loss": 0.7201,
"step": 20
},
{
"epoch": 0.08955223880597014,
"grad_norm": 0.976830869365462,
"learning_rate": 5e-06,
"loss": 0.7097,
"step": 30
},
{
"epoch": 0.11940298507462686,
"grad_norm": 1.0243806020799167,
"learning_rate": 5e-06,
"loss": 0.687,
"step": 40
},
{
"epoch": 0.14925373134328357,
"grad_norm": 1.0277760094413817,
"learning_rate": 5e-06,
"loss": 0.6709,
"step": 50
},
{
"epoch": 0.1791044776119403,
"grad_norm": 0.8438731934295206,
"learning_rate": 5e-06,
"loss": 0.6661,
"step": 60
},
{
"epoch": 0.208955223880597,
"grad_norm": 0.7599111846557922,
"learning_rate": 5e-06,
"loss": 0.661,
"step": 70
},
{
"epoch": 0.23880597014925373,
"grad_norm": 0.6979506430052831,
"learning_rate": 5e-06,
"loss": 0.6481,
"step": 80
},
{
"epoch": 0.26865671641791045,
"grad_norm": 0.6344985723138951,
"learning_rate": 5e-06,
"loss": 0.6494,
"step": 90
},
{
"epoch": 0.29850746268656714,
"grad_norm": 0.9496025763872573,
"learning_rate": 5e-06,
"loss": 0.6351,
"step": 100
},
{
"epoch": 0.3283582089552239,
"grad_norm": 0.5640696000070055,
"learning_rate": 5e-06,
"loss": 0.6292,
"step": 110
},
{
"epoch": 0.3582089552238806,
"grad_norm": 0.6244500377098101,
"learning_rate": 5e-06,
"loss": 0.6246,
"step": 120
},
{
"epoch": 0.3880597014925373,
"grad_norm": 0.705148467511176,
"learning_rate": 5e-06,
"loss": 0.6288,
"step": 130
},
{
"epoch": 0.417910447761194,
"grad_norm": 0.5782292155064868,
"learning_rate": 5e-06,
"loss": 0.6208,
"step": 140
},
{
"epoch": 0.44776119402985076,
"grad_norm": 0.47879907688098855,
"learning_rate": 5e-06,
"loss": 0.6307,
"step": 150
},
{
"epoch": 0.47761194029850745,
"grad_norm": 0.641221897829538,
"learning_rate": 5e-06,
"loss": 0.6282,
"step": 160
},
{
"epoch": 0.5074626865671642,
"grad_norm": 0.5790307980225682,
"learning_rate": 5e-06,
"loss": 0.6263,
"step": 170
},
{
"epoch": 0.5373134328358209,
"grad_norm": 0.5896556501658362,
"learning_rate": 5e-06,
"loss": 0.6171,
"step": 180
},
{
"epoch": 0.5671641791044776,
"grad_norm": 0.6678853160055497,
"learning_rate": 5e-06,
"loss": 0.6217,
"step": 190
},
{
"epoch": 0.5970149253731343,
"grad_norm": 0.4932224279411186,
"learning_rate": 5e-06,
"loss": 0.612,
"step": 200
},
{
"epoch": 0.6268656716417911,
"grad_norm": 0.59016513490961,
"learning_rate": 5e-06,
"loss": 0.6207,
"step": 210
},
{
"epoch": 0.6567164179104478,
"grad_norm": 0.6628507303786519,
"learning_rate": 5e-06,
"loss": 0.6205,
"step": 220
},
{
"epoch": 0.6865671641791045,
"grad_norm": 0.5728090788820762,
"learning_rate": 5e-06,
"loss": 0.6198,
"step": 230
},
{
"epoch": 0.7164179104477612,
"grad_norm": 0.5909920699712947,
"learning_rate": 5e-06,
"loss": 0.6195,
"step": 240
},
{
"epoch": 0.746268656716418,
"grad_norm": 0.5403630353274208,
"learning_rate": 5e-06,
"loss": 0.616,
"step": 250
},
{
"epoch": 0.7761194029850746,
"grad_norm": 0.4389359227878181,
"learning_rate": 5e-06,
"loss": 0.615,
"step": 260
},
{
"epoch": 0.8059701492537313,
"grad_norm": 0.5071059932681349,
"learning_rate": 5e-06,
"loss": 0.6119,
"step": 270
},
{
"epoch": 0.835820895522388,
"grad_norm": 0.5232710694651,
"learning_rate": 5e-06,
"loss": 0.6052,
"step": 280
},
{
"epoch": 0.8656716417910447,
"grad_norm": 0.6304632419292111,
"learning_rate": 5e-06,
"loss": 0.6107,
"step": 290
},
{
"epoch": 0.8955223880597015,
"grad_norm": 0.4513885510016045,
"learning_rate": 5e-06,
"loss": 0.6029,
"step": 300
},
{
"epoch": 0.9253731343283582,
"grad_norm": 0.46627673450975804,
"learning_rate": 5e-06,
"loss": 0.6224,
"step": 310
},
{
"epoch": 0.9552238805970149,
"grad_norm": 0.4541172691354782,
"learning_rate": 5e-06,
"loss": 0.6005,
"step": 320
},
{
"epoch": 0.9850746268656716,
"grad_norm": 0.5886900494680195,
"learning_rate": 5e-06,
"loss": 0.6102,
"step": 330
},
{
"epoch": 1.0,
"eval_loss": 0.6099968552589417,
"eval_runtime": 181.0403,
"eval_samples_per_second": 49.84,
"eval_steps_per_second": 0.392,
"step": 335
},
{
"epoch": 1.0149253731343284,
"grad_norm": 0.7366116445067696,
"learning_rate": 5e-06,
"loss": 0.5859,
"step": 340
},
{
"epoch": 1.044776119402985,
"grad_norm": 0.571210239937034,
"learning_rate": 5e-06,
"loss": 0.56,
"step": 350
},
{
"epoch": 1.0746268656716418,
"grad_norm": 0.4668816015405423,
"learning_rate": 5e-06,
"loss": 0.5592,
"step": 360
},
{
"epoch": 1.1044776119402986,
"grad_norm": 0.5628631007519552,
"learning_rate": 5e-06,
"loss": 0.5657,
"step": 370
},
{
"epoch": 1.1343283582089552,
"grad_norm": 0.5929997737908812,
"learning_rate": 5e-06,
"loss": 0.5563,
"step": 380
},
{
"epoch": 1.164179104477612,
"grad_norm": 0.5658505485409301,
"learning_rate": 5e-06,
"loss": 0.5578,
"step": 390
},
{
"epoch": 1.1940298507462686,
"grad_norm": 0.7526049702677613,
"learning_rate": 5e-06,
"loss": 0.5615,
"step": 400
},
{
"epoch": 1.2238805970149254,
"grad_norm": 0.5393989822533652,
"learning_rate": 5e-06,
"loss": 0.5673,
"step": 410
},
{
"epoch": 1.2537313432835822,
"grad_norm": 0.5325263654739846,
"learning_rate": 5e-06,
"loss": 0.5638,
"step": 420
},
{
"epoch": 1.2835820895522387,
"grad_norm": 0.5064885711552297,
"learning_rate": 5e-06,
"loss": 0.5607,
"step": 430
},
{
"epoch": 1.3134328358208955,
"grad_norm": 0.5684568858118794,
"learning_rate": 5e-06,
"loss": 0.5576,
"step": 440
},
{
"epoch": 1.3432835820895521,
"grad_norm": 0.5283953048651826,
"learning_rate": 5e-06,
"loss": 0.5561,
"step": 450
},
{
"epoch": 1.373134328358209,
"grad_norm": 0.46950929831714766,
"learning_rate": 5e-06,
"loss": 0.5602,
"step": 460
},
{
"epoch": 1.4029850746268657,
"grad_norm": 0.5385565397537747,
"learning_rate": 5e-06,
"loss": 0.5581,
"step": 470
},
{
"epoch": 1.4328358208955223,
"grad_norm": 0.5659871694460666,
"learning_rate": 5e-06,
"loss": 0.5617,
"step": 480
},
{
"epoch": 1.462686567164179,
"grad_norm": 0.546965476854921,
"learning_rate": 5e-06,
"loss": 0.5553,
"step": 490
},
{
"epoch": 1.4925373134328357,
"grad_norm": 0.497538374377084,
"learning_rate": 5e-06,
"loss": 0.5602,
"step": 500
},
{
"epoch": 1.5223880597014925,
"grad_norm": 0.5312343228790736,
"learning_rate": 5e-06,
"loss": 0.5618,
"step": 510
},
{
"epoch": 1.5522388059701493,
"grad_norm": 0.5166406423583252,
"learning_rate": 5e-06,
"loss": 0.5591,
"step": 520
},
{
"epoch": 1.582089552238806,
"grad_norm": 0.5255413189150826,
"learning_rate": 5e-06,
"loss": 0.5604,
"step": 530
},
{
"epoch": 1.6119402985074627,
"grad_norm": 0.49551565255287583,
"learning_rate": 5e-06,
"loss": 0.5618,
"step": 540
},
{
"epoch": 1.6417910447761193,
"grad_norm": 0.49267014615726396,
"learning_rate": 5e-06,
"loss": 0.5585,
"step": 550
},
{
"epoch": 1.671641791044776,
"grad_norm": 0.5643970187910693,
"learning_rate": 5e-06,
"loss": 0.5652,
"step": 560
},
{
"epoch": 1.7014925373134329,
"grad_norm": 0.5037470018499742,
"learning_rate": 5e-06,
"loss": 0.5625,
"step": 570
},
{
"epoch": 1.7313432835820897,
"grad_norm": 0.48141660593931324,
"learning_rate": 5e-06,
"loss": 0.5568,
"step": 580
},
{
"epoch": 1.7611940298507462,
"grad_norm": 0.5300295332006951,
"learning_rate": 5e-06,
"loss": 0.5597,
"step": 590
},
{
"epoch": 1.7910447761194028,
"grad_norm": 0.5373472168427755,
"learning_rate": 5e-06,
"loss": 0.5562,
"step": 600
},
{
"epoch": 1.8208955223880596,
"grad_norm": 0.5707503080671126,
"learning_rate": 5e-06,
"loss": 0.5527,
"step": 610
},
{
"epoch": 1.8507462686567164,
"grad_norm": 0.5730054222686448,
"learning_rate": 5e-06,
"loss": 0.5665,
"step": 620
},
{
"epoch": 1.8805970149253732,
"grad_norm": 0.5072679291608492,
"learning_rate": 5e-06,
"loss": 0.5513,
"step": 630
},
{
"epoch": 1.9104477611940298,
"grad_norm": 0.5178695559305173,
"learning_rate": 5e-06,
"loss": 0.5583,
"step": 640
},
{
"epoch": 1.9402985074626866,
"grad_norm": 0.5656735937545936,
"learning_rate": 5e-06,
"loss": 0.563,
"step": 650
},
{
"epoch": 1.9701492537313432,
"grad_norm": 0.4863140367653273,
"learning_rate": 5e-06,
"loss": 0.5595,
"step": 660
},
{
"epoch": 2.0,
"grad_norm": 0.5328164667475485,
"learning_rate": 5e-06,
"loss": 0.553,
"step": 670
},
{
"epoch": 2.0,
"eval_loss": 0.6020517349243164,
"eval_runtime": 181.5231,
"eval_samples_per_second": 49.707,
"eval_steps_per_second": 0.391,
"step": 670
},
{
"epoch": 2.029850746268657,
"grad_norm": 0.6224628520404539,
"learning_rate": 5e-06,
"loss": 0.5057,
"step": 680
},
{
"epoch": 2.0597014925373136,
"grad_norm": 0.5148413250898433,
"learning_rate": 5e-06,
"loss": 0.5083,
"step": 690
},
{
"epoch": 2.08955223880597,
"grad_norm": 0.5438872711269419,
"learning_rate": 5e-06,
"loss": 0.504,
"step": 700
},
{
"epoch": 2.1194029850746268,
"grad_norm": 0.7136622301499072,
"learning_rate": 5e-06,
"loss": 0.5084,
"step": 710
},
{
"epoch": 2.1492537313432836,
"grad_norm": 0.565521915151828,
"learning_rate": 5e-06,
"loss": 0.5141,
"step": 720
},
{
"epoch": 2.1791044776119404,
"grad_norm": 0.5319158435460484,
"learning_rate": 5e-06,
"loss": 0.5107,
"step": 730
},
{
"epoch": 2.208955223880597,
"grad_norm": 0.5565738587496012,
"learning_rate": 5e-06,
"loss": 0.5096,
"step": 740
},
{
"epoch": 2.2388059701492535,
"grad_norm": 0.4910069126256564,
"learning_rate": 5e-06,
"loss": 0.5154,
"step": 750
},
{
"epoch": 2.2686567164179103,
"grad_norm": 0.48169824804919137,
"learning_rate": 5e-06,
"loss": 0.515,
"step": 760
},
{
"epoch": 2.298507462686567,
"grad_norm": 0.5716888321310263,
"learning_rate": 5e-06,
"loss": 0.5059,
"step": 770
},
{
"epoch": 2.328358208955224,
"grad_norm": 0.5310388379236972,
"learning_rate": 5e-06,
"loss": 0.5065,
"step": 780
},
{
"epoch": 2.3582089552238807,
"grad_norm": 0.5340384268973276,
"learning_rate": 5e-06,
"loss": 0.5097,
"step": 790
},
{
"epoch": 2.388059701492537,
"grad_norm": 0.5612588485250506,
"learning_rate": 5e-06,
"loss": 0.5092,
"step": 800
},
{
"epoch": 2.417910447761194,
"grad_norm": 0.5517492107037403,
"learning_rate": 5e-06,
"loss": 0.5155,
"step": 810
},
{
"epoch": 2.4477611940298507,
"grad_norm": 0.5868418861130762,
"learning_rate": 5e-06,
"loss": 0.5099,
"step": 820
},
{
"epoch": 2.4776119402985075,
"grad_norm": 0.5335016774082386,
"learning_rate": 5e-06,
"loss": 0.4953,
"step": 830
},
{
"epoch": 2.5074626865671643,
"grad_norm": 0.47262015142721125,
"learning_rate": 5e-06,
"loss": 0.508,
"step": 840
},
{
"epoch": 2.5373134328358207,
"grad_norm": 0.4852270938780388,
"learning_rate": 5e-06,
"loss": 0.5125,
"step": 850
},
{
"epoch": 2.5671641791044775,
"grad_norm": 0.5802563395388763,
"learning_rate": 5e-06,
"loss": 0.5095,
"step": 860
},
{
"epoch": 2.5970149253731343,
"grad_norm": 0.503360426921311,
"learning_rate": 5e-06,
"loss": 0.5189,
"step": 870
},
{
"epoch": 2.626865671641791,
"grad_norm": 0.49515721551631564,
"learning_rate": 5e-06,
"loss": 0.5158,
"step": 880
},
{
"epoch": 2.656716417910448,
"grad_norm": 0.48219119082060224,
"learning_rate": 5e-06,
"loss": 0.509,
"step": 890
},
{
"epoch": 2.6865671641791042,
"grad_norm": 0.4921666373084123,
"learning_rate": 5e-06,
"loss": 0.5099,
"step": 900
},
{
"epoch": 2.716417910447761,
"grad_norm": 0.547644485149717,
"learning_rate": 5e-06,
"loss": 0.5056,
"step": 910
},
{
"epoch": 2.746268656716418,
"grad_norm": 0.5561203783171008,
"learning_rate": 5e-06,
"loss": 0.5105,
"step": 920
},
{
"epoch": 2.7761194029850746,
"grad_norm": 0.5027190076200428,
"learning_rate": 5e-06,
"loss": 0.5101,
"step": 930
},
{
"epoch": 2.8059701492537314,
"grad_norm": 0.5325189560701381,
"learning_rate": 5e-06,
"loss": 0.5126,
"step": 940
},
{
"epoch": 2.835820895522388,
"grad_norm": 0.6188498248886457,
"learning_rate": 5e-06,
"loss": 0.5125,
"step": 950
},
{
"epoch": 2.8656716417910446,
"grad_norm": 0.5315894494034671,
"learning_rate": 5e-06,
"loss": 0.5101,
"step": 960
},
{
"epoch": 2.8955223880597014,
"grad_norm": 0.5715252097778681,
"learning_rate": 5e-06,
"loss": 0.5138,
"step": 970
},
{
"epoch": 2.925373134328358,
"grad_norm": 0.4780679784022612,
"learning_rate": 5e-06,
"loss": 0.5108,
"step": 980
},
{
"epoch": 2.955223880597015,
"grad_norm": 0.48072163191940603,
"learning_rate": 5e-06,
"loss": 0.5103,
"step": 990
},
{
"epoch": 2.9850746268656714,
"grad_norm": 0.5364386007246811,
"learning_rate": 5e-06,
"loss": 0.5156,
"step": 1000
},
{
"epoch": 3.0,
"eval_loss": 0.6066075563430786,
"eval_runtime": 181.24,
"eval_samples_per_second": 49.785,
"eval_steps_per_second": 0.392,
"step": 1005
},
{
"epoch": 3.0,
"step": 1005,
"total_flos": 1682993672355840.0,
"train_loss": 0.5693152133505143,
"train_runtime": 30325.6118,
"train_samples_per_second": 16.959,
"train_steps_per_second": 0.033
}
],
"logging_steps": 10,
"max_steps": 1005,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1682993672355840.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}