Jeckmu's picture
Upload folder using huggingface_hub
1663891 verified
raw
history blame
4.98 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.791519434628975,
"eval_steps": 500,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.1413427561837456,
"grad_norm": 8.807223320007324,
"learning_rate": 4.9821173158545936e-05,
"loss": 4.4182,
"num_input_tokens_seen": 89360,
"step": 5
},
{
"epoch": 0.2826855123674912,
"grad_norm": 9.933388710021973,
"learning_rate": 4.928725095732169e-05,
"loss": 2.2953,
"num_input_tokens_seen": 173680,
"step": 10
},
{
"epoch": 0.42402826855123676,
"grad_norm": 11.242781639099121,
"learning_rate": 4.813260751184992e-05,
"loss": 2.6132,
"num_input_tokens_seen": 256320,
"step": 15
},
{
"epoch": 0.5653710247349824,
"grad_norm": 18.152969360351562,
"learning_rate": 4.6461219840046654e-05,
"loss": 2.6473,
"num_input_tokens_seen": 354080,
"step": 20
},
{
"epoch": 0.7067137809187279,
"grad_norm": 13.362907409667969,
"learning_rate": 4.431042398061499e-05,
"loss": 1.9798,
"num_input_tokens_seen": 445120,
"step": 25
},
{
"epoch": 0.8480565371024735,
"grad_norm": 11.124238014221191,
"learning_rate": 4.172826515897146e-05,
"loss": 1.6494,
"num_input_tokens_seen": 536160,
"step": 30
},
{
"epoch": 0.9893992932862191,
"grad_norm": 13.07321548461914,
"learning_rate": 3.8772424536302564e-05,
"loss": 1.2652,
"num_input_tokens_seen": 622160,
"step": 35
},
{
"epoch": 1.1130742049469964,
"grad_norm": 10.049528121948242,
"learning_rate": 3.550893070773914e-05,
"loss": 1.1679,
"num_input_tokens_seen": 702992,
"step": 40
},
{
"epoch": 1.254416961130742,
"grad_norm": 3.1295673847198486,
"learning_rate": 3.201068473265007e-05,
"loss": 0.7785,
"num_input_tokens_seen": 795712,
"step": 45
},
{
"epoch": 1.3957597173144876,
"grad_norm": 13.35985279083252,
"learning_rate": 2.8355831645441388e-05,
"loss": 1.3648,
"num_input_tokens_seen": 880032,
"step": 50
},
{
"epoch": 1.5371024734982333,
"grad_norm": 11.210596084594727,
"learning_rate": 2.4626014824618415e-05,
"loss": 1.1121,
"num_input_tokens_seen": 969392,
"step": 55
},
{
"epoch": 1.6784452296819787,
"grad_norm": 8.436594009399414,
"learning_rate": 2.090455221462156e-05,
"loss": 1.1847,
"num_input_tokens_seen": 1058752,
"step": 60
},
{
"epoch": 1.8197879858657244,
"grad_norm": 7.8270649909973145,
"learning_rate": 1.7274575140626318e-05,
"loss": 0.4018,
"num_input_tokens_seen": 1143072,
"step": 65
},
{
"epoch": 1.96113074204947,
"grad_norm": 18.734634399414062,
"learning_rate": 1.3817171292109183e-05,
"loss": 0.7509,
"num_input_tokens_seen": 1234112,
"step": 70
},
{
"epoch": 2.0848056537102475,
"grad_norm": 22.8198299407959,
"learning_rate": 1.0609573357858166e-05,
"loss": 0.46,
"num_input_tokens_seen": 1303184,
"step": 75
},
{
"epoch": 2.2261484098939928,
"grad_norm": 6.278092861175537,
"learning_rate": 7.723433775328384e-06,
"loss": 0.4216,
"num_input_tokens_seen": 1385824,
"step": 80
},
{
"epoch": 2.3674911660777385,
"grad_norm": 24.357179641723633,
"learning_rate": 5.223224133591476e-06,
"loss": 0.5816,
"num_input_tokens_seen": 1476864,
"step": 85
},
{
"epoch": 2.508833922261484,
"grad_norm": 8.690754890441895,
"learning_rate": 3.164794984571759e-06,
"loss": 0.4955,
"num_input_tokens_seen": 1569584,
"step": 90
},
{
"epoch": 2.65017667844523,
"grad_norm": 3.9934189319610596,
"learning_rate": 1.59412823400657e-06,
"loss": 0.2691,
"num_input_tokens_seen": 1657264,
"step": 95
},
{
"epoch": 2.791519434628975,
"grad_norm": 9.84725284576416,
"learning_rate": 5.463099816548579e-07,
"loss": 1.1078,
"num_input_tokens_seen": 1746624,
"step": 100
}
],
"logging_steps": 5,
"max_steps": 105,
"num_input_tokens_seen": 1746624,
"num_train_epochs": 3,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.3006890837540864e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}