mariagrandury's picture
End of training
52efe8b verified
{
"best_metric": 0.23586615753847973,
"best_model_checkpoint": "/content/drive/MyDrive/model_outputs/trial_5/checkpoint-292",
"epoch": 3.0,
"eval_steps": 500,
"global_step": 438,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0684931506849315,
"grad_norm": 5.608231067657471,
"learning_rate": 3.6003360497121445e-06,
"loss": 2.2214,
"step": 10
},
{
"epoch": 0.136986301369863,
"grad_norm": 5.646218776702881,
"learning_rate": 7.200672099424289e-06,
"loss": 2.1624,
"step": 20
},
{
"epoch": 0.2054794520547945,
"grad_norm": 5.6624908447265625,
"learning_rate": 1.041544648904501e-05,
"loss": 2.1074,
"step": 30
},
{
"epoch": 0.273972602739726,
"grad_norm": 5.1634697914123535,
"learning_rate": 1.0160165937842927e-05,
"loss": 2.0965,
"step": 40
},
{
"epoch": 0.3424657534246575,
"grad_norm": 4.445455551147461,
"learning_rate": 9.904885386640843e-06,
"loss": 2.0527,
"step": 50
},
{
"epoch": 0.410958904109589,
"grad_norm": 4.640649795532227,
"learning_rate": 9.64960483543876e-06,
"loss": 2.0678,
"step": 60
},
{
"epoch": 0.4794520547945205,
"grad_norm": 5.322652816772461,
"learning_rate": 9.394324284236676e-06,
"loss": 2.0892,
"step": 70
},
{
"epoch": 0.547945205479452,
"grad_norm": 5.406436443328857,
"learning_rate": 9.139043733034592e-06,
"loss": 2.0179,
"step": 80
},
{
"epoch": 0.6164383561643836,
"grad_norm": 4.761965751647949,
"learning_rate": 8.883763181832509e-06,
"loss": 2.0074,
"step": 90
},
{
"epoch": 0.684931506849315,
"grad_norm": 4.901184558868408,
"learning_rate": 8.628482630630425e-06,
"loss": 2.0241,
"step": 100
},
{
"epoch": 0.7534246575342466,
"grad_norm": 5.2895941734313965,
"learning_rate": 8.373202079428341e-06,
"loss": 2.0019,
"step": 110
},
{
"epoch": 0.821917808219178,
"grad_norm": 4.653793811798096,
"learning_rate": 8.117921528226258e-06,
"loss": 2.0277,
"step": 120
},
{
"epoch": 0.8904109589041096,
"grad_norm": 5.342679977416992,
"learning_rate": 7.862640977024174e-06,
"loss": 1.9835,
"step": 130
},
{
"epoch": 0.958904109589041,
"grad_norm": 5.529128074645996,
"learning_rate": 7.607360425822091e-06,
"loss": 2.0019,
"step": 140
},
{
"epoch": 1.0,
"eval_classification_report": {
"accuracy": 0.2285,
"ar": {
"f1-score": 0.0547945205479452,
"precision": 0.46153846153846156,
"recall": 0.02912621359223301,
"support": 206.0
},
"cl": {
"f1-score": 0.2222222222222222,
"precision": 0.18932038834951456,
"recall": 0.2689655172413793,
"support": 290.0
},
"co": {
"f1-score": 0.2832980972515856,
"precision": 0.20458015267175572,
"recall": 0.46048109965635736,
"support": 291.0
},
"es": {
"f1-score": 0.2547993019197208,
"precision": 0.24829931972789115,
"recall": 0.2616487455197133,
"support": 279.0
},
"macro avg": {
"f1-score": 0.1991778548194697,
"precision": 0.27210428357115557,
"recall": 0.2048681078220834,
"support": 2000.0
},
"mx": {
"f1-score": 0.23859649122807017,
"precision": 0.24372759856630824,
"recall": 0.23367697594501718,
"support": 291.0
},
"pe": {
"f1-score": 0.174573055028463,
"precision": 0.19491525423728814,
"recall": 0.15807560137457044,
"support": 291.0
},
"pr": {
"f1-score": 0.4567901234567901,
"precision": 0.6065573770491803,
"recall": 0.36633663366336633,
"support": 101.0
},
"uy": {
"f1-score": 0.10752688172043011,
"precision": 0.3,
"recall": 0.06550218340611354,
"support": 229.0
},
"ve": {
"f1-score": 0.0,
"precision": 0.0,
"recall": 0.0,
"support": 22.0
},
"weighted avg": {
"f1-score": 0.21012633177845003,
"precision": 0.2681977677888293,
"recall": 0.2285,
"support": 2000.0
}
},
"eval_f1": 0.1991778548194697,
"eval_loss": 1.9731428623199463,
"eval_runtime": 3.5688,
"eval_samples_per_second": 560.405,
"eval_steps_per_second": 17.653,
"step": 146
},
{
"epoch": 1.0273972602739727,
"grad_norm": 4.819364547729492,
"learning_rate": 7.352079874620008e-06,
"loss": 1.9655,
"step": 150
},
{
"epoch": 1.095890410958904,
"grad_norm": 5.056692123413086,
"learning_rate": 7.096799323417924e-06,
"loss": 1.9654,
"step": 160
},
{
"epoch": 1.1643835616438356,
"grad_norm": 5.491762638092041,
"learning_rate": 6.8415187722158405e-06,
"loss": 1.9755,
"step": 170
},
{
"epoch": 1.2328767123287672,
"grad_norm": 5.931197643280029,
"learning_rate": 6.586238221013757e-06,
"loss": 1.9487,
"step": 180
},
{
"epoch": 1.3013698630136985,
"grad_norm": 6.085172176361084,
"learning_rate": 6.330957669811673e-06,
"loss": 1.9436,
"step": 190
},
{
"epoch": 1.36986301369863,
"grad_norm": 6.041102409362793,
"learning_rate": 6.07567711860959e-06,
"loss": 1.9561,
"step": 200
},
{
"epoch": 1.4383561643835616,
"grad_norm": 5.231081008911133,
"learning_rate": 5.820396567407506e-06,
"loss": 1.959,
"step": 210
},
{
"epoch": 1.5068493150684932,
"grad_norm": 5.686812400817871,
"learning_rate": 5.565116016205422e-06,
"loss": 1.8635,
"step": 220
},
{
"epoch": 1.5753424657534247,
"grad_norm": 5.119288921356201,
"learning_rate": 5.309835465003339e-06,
"loss": 1.9364,
"step": 230
},
{
"epoch": 1.643835616438356,
"grad_norm": 6.177616596221924,
"learning_rate": 5.054554913801255e-06,
"loss": 1.8987,
"step": 240
},
{
"epoch": 1.7123287671232876,
"grad_norm": 5.89414644241333,
"learning_rate": 4.7992743625991715e-06,
"loss": 1.9352,
"step": 250
},
{
"epoch": 1.7808219178082192,
"grad_norm": 6.277669429779053,
"learning_rate": 4.543993811397088e-06,
"loss": 1.8815,
"step": 260
},
{
"epoch": 1.8493150684931505,
"grad_norm": 5.846259593963623,
"learning_rate": 4.288713260195004e-06,
"loss": 1.9424,
"step": 270
},
{
"epoch": 1.9178082191780823,
"grad_norm": 5.867184162139893,
"learning_rate": 4.033432708992921e-06,
"loss": 1.9088,
"step": 280
},
{
"epoch": 1.9863013698630136,
"grad_norm": 8.803180694580078,
"learning_rate": 3.778152157790837e-06,
"loss": 1.8713,
"step": 290
},
{
"epoch": 2.0,
"eval_classification_report": {
"accuracy": 0.261,
"ar": {
"f1-score": 0.061946902654867256,
"precision": 0.35,
"recall": 0.03398058252427184,
"support": 206.0
},
"cl": {
"f1-score": 0.2,
"precision": 0.1935483870967742,
"recall": 0.20689655172413793,
"support": 290.0
},
"co": {
"f1-score": 0.30229007633587784,
"precision": 0.27197802197802196,
"recall": 0.3402061855670103,
"support": 291.0
},
"es": {
"f1-score": 0.32801161103047893,
"precision": 0.275609756097561,
"recall": 0.4050179211469534,
"support": 279.0
},
"macro avg": {
"f1-score": 0.23586615753847973,
"precision": 0.28112434178245116,
"recall": 0.2369581042839982,
"support": 2000.0
},
"mx": {
"f1-score": 0.272108843537415,
"precision": 0.26936026936026936,
"recall": 0.27491408934707906,
"support": 291.0
},
"pe": {
"f1-score": 0.2346368715083799,
"precision": 0.1976470588235294,
"recall": 0.28865979381443296,
"support": 291.0
},
"pr": {
"f1-score": 0.5088757396449705,
"precision": 0.6323529411764706,
"recall": 0.42574257425742573,
"support": 101.0
},
"uy": {
"f1-score": 0.21492537313432836,
"precision": 0.33962264150943394,
"recall": 0.1572052401746725,
"support": 229.0
},
"ve": {
"f1-score": 0.0,
"precision": 0.0,
"recall": 0.0,
"support": 22.0
},
"weighted avg": {
"f1-score": 0.24916003843418816,
"precision": 0.2809050615354289,
"recall": 0.261,
"support": 2000.0
}
},
"eval_f1": 0.23586615753847973,
"eval_loss": 1.9224119186401367,
"eval_runtime": 3.5253,
"eval_samples_per_second": 567.324,
"eval_steps_per_second": 17.871,
"step": 292
},
{
"epoch": 2.0547945205479454,
"grad_norm": 6.486109733581543,
"learning_rate": 3.5228716065887534e-06,
"loss": 1.841,
"step": 300
},
{
"epoch": 2.1232876712328768,
"grad_norm": 6.070629596710205,
"learning_rate": 3.26759105538667e-06,
"loss": 1.8551,
"step": 310
},
{
"epoch": 2.191780821917808,
"grad_norm": 6.238626003265381,
"learning_rate": 3.012310504184586e-06,
"loss": 1.8897,
"step": 320
},
{
"epoch": 2.26027397260274,
"grad_norm": 6.827130317687988,
"learning_rate": 2.7570299529825026e-06,
"loss": 1.814,
"step": 330
},
{
"epoch": 2.328767123287671,
"grad_norm": 6.772587776184082,
"learning_rate": 2.501749401780419e-06,
"loss": 1.8214,
"step": 340
},
{
"epoch": 2.3972602739726026,
"grad_norm": 6.412906646728516,
"learning_rate": 2.2464688505783353e-06,
"loss": 1.8611,
"step": 350
},
{
"epoch": 2.4657534246575343,
"grad_norm": 6.432934284210205,
"learning_rate": 1.9911882993762517e-06,
"loss": 1.8398,
"step": 360
},
{
"epoch": 2.5342465753424657,
"grad_norm": 6.384337425231934,
"learning_rate": 1.7359077481741683e-06,
"loss": 1.8487,
"step": 370
},
{
"epoch": 2.602739726027397,
"grad_norm": 6.324769020080566,
"learning_rate": 1.4806271969720847e-06,
"loss": 1.8422,
"step": 380
},
{
"epoch": 2.671232876712329,
"grad_norm": 6.698541641235352,
"learning_rate": 1.2253466457700013e-06,
"loss": 1.8652,
"step": 390
},
{
"epoch": 2.73972602739726,
"grad_norm": 6.660873889923096,
"learning_rate": 9.700660945679177e-07,
"loss": 1.8213,
"step": 400
},
{
"epoch": 2.808219178082192,
"grad_norm": 6.92650032043457,
"learning_rate": 7.147855433658339e-07,
"loss": 1.847,
"step": 410
},
{
"epoch": 2.8767123287671232,
"grad_norm": 7.111581802368164,
"learning_rate": 4.595049921637505e-07,
"loss": 1.8672,
"step": 420
},
{
"epoch": 2.9452054794520546,
"grad_norm": 7.183070182800293,
"learning_rate": 2.0422444096166686e-07,
"loss": 1.8367,
"step": 430
}
],
"logging_steps": 10,
"max_steps": 438,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 920815007980032.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}