|
{ |
|
"best_metric": 1.3336918354034424, |
|
"best_model_checkpoint": "smilemikan/nllb-finetuned-jpn-to-ain-2/checkpoint-44000", |
|
"epoch": 9.914077990746861, |
|
"eval_steps": 500, |
|
"global_step": 45000, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.11, |
|
"learning_rate": 1.9833333333333335e-05, |
|
"loss": 4.6196, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.11, |
|
"eval_loss": 3.7452237606048584, |
|
"eval_runtime": 43.586, |
|
"eval_samples_per_second": 208.278, |
|
"eval_steps_per_second": 13.032, |
|
"step": 500 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"learning_rate": 1.9666666666666666e-05, |
|
"loss": 3.5915, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.22, |
|
"eval_loss": 3.2064383029937744, |
|
"eval_runtime": 43.0726, |
|
"eval_samples_per_second": 210.76, |
|
"eval_steps_per_second": 13.187, |
|
"step": 1000 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"learning_rate": 1.95e-05, |
|
"loss": 3.1484, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.33, |
|
"eval_loss": 2.8919074535369873, |
|
"eval_runtime": 42.8255, |
|
"eval_samples_per_second": 211.977, |
|
"eval_steps_per_second": 13.263, |
|
"step": 1500 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"learning_rate": 1.9333333333333333e-05, |
|
"loss": 2.902, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.44, |
|
"eval_loss": 2.679006338119507, |
|
"eval_runtime": 43.6162, |
|
"eval_samples_per_second": 208.134, |
|
"eval_steps_per_second": 13.023, |
|
"step": 2000 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"learning_rate": 1.916666666666667e-05, |
|
"loss": 2.7296, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.55, |
|
"eval_loss": 2.5206823348999023, |
|
"eval_runtime": 42.8291, |
|
"eval_samples_per_second": 211.959, |
|
"eval_steps_per_second": 13.262, |
|
"step": 2500 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"learning_rate": 1.9000333333333335e-05, |
|
"loss": 2.6105, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.66, |
|
"eval_loss": 2.394014835357666, |
|
"eval_runtime": 42.8022, |
|
"eval_samples_per_second": 212.092, |
|
"eval_steps_per_second": 13.27, |
|
"step": 3000 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"learning_rate": 1.883366666666667e-05, |
|
"loss": 2.5068, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.77, |
|
"eval_loss": 2.30385422706604, |
|
"eval_runtime": 44.6981, |
|
"eval_samples_per_second": 203.096, |
|
"eval_steps_per_second": 12.707, |
|
"step": 3500 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"learning_rate": 1.8667000000000003e-05, |
|
"loss": 2.3848, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.88, |
|
"eval_loss": 2.2251899242401123, |
|
"eval_runtime": 42.8042, |
|
"eval_samples_per_second": 212.082, |
|
"eval_steps_per_second": 13.27, |
|
"step": 4000 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"learning_rate": 1.8500333333333337e-05, |
|
"loss": 2.3391, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 0.99, |
|
"eval_loss": 2.148716449737549, |
|
"eval_runtime": 43.0552, |
|
"eval_samples_per_second": 210.845, |
|
"eval_steps_per_second": 13.192, |
|
"step": 4500 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"learning_rate": 1.8334e-05, |
|
"loss": 2.1994, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.1, |
|
"eval_loss": 2.091869354248047, |
|
"eval_runtime": 43.5857, |
|
"eval_samples_per_second": 208.279, |
|
"eval_steps_per_second": 13.032, |
|
"step": 5000 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"learning_rate": 1.8167333333333335e-05, |
|
"loss": 2.1446, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.21, |
|
"eval_loss": 2.0449471473693848, |
|
"eval_runtime": 43.2545, |
|
"eval_samples_per_second": 209.874, |
|
"eval_steps_per_second": 13.132, |
|
"step": 5500 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"learning_rate": 1.800066666666667e-05, |
|
"loss": 2.1012, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.32, |
|
"eval_loss": 2.0015575885772705, |
|
"eval_runtime": 43.2116, |
|
"eval_samples_per_second": 210.083, |
|
"eval_steps_per_second": 13.145, |
|
"step": 6000 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"learning_rate": 1.7834000000000002e-05, |
|
"loss": 2.0542, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.43, |
|
"eval_loss": 1.951575517654419, |
|
"eval_runtime": 43.3919, |
|
"eval_samples_per_second": 209.209, |
|
"eval_steps_per_second": 13.09, |
|
"step": 6500 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"learning_rate": 1.7667333333333333e-05, |
|
"loss": 2.0216, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.54, |
|
"eval_loss": 1.9161829948425293, |
|
"eval_runtime": 43.6948, |
|
"eval_samples_per_second": 207.759, |
|
"eval_steps_per_second": 12.999, |
|
"step": 7000 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"learning_rate": 1.7500666666666667e-05, |
|
"loss": 1.9926, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.65, |
|
"eval_loss": 1.8790709972381592, |
|
"eval_runtime": 44.1328, |
|
"eval_samples_per_second": 205.698, |
|
"eval_steps_per_second": 12.87, |
|
"step": 7500 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"learning_rate": 1.7334e-05, |
|
"loss": 1.9278, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.76, |
|
"eval_loss": 1.845486044883728, |
|
"eval_runtime": 44.2649, |
|
"eval_samples_per_second": 205.084, |
|
"eval_steps_per_second": 12.832, |
|
"step": 8000 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"learning_rate": 1.7167333333333334e-05, |
|
"loss": 1.9305, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.87, |
|
"eval_loss": 1.8124595880508423, |
|
"eval_runtime": 43.561, |
|
"eval_samples_per_second": 208.398, |
|
"eval_steps_per_second": 13.039, |
|
"step": 8500 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"learning_rate": 1.7000666666666668e-05, |
|
"loss": 1.905, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 1.98, |
|
"eval_loss": 1.7888203859329224, |
|
"eval_runtime": 43.4924, |
|
"eval_samples_per_second": 208.726, |
|
"eval_steps_per_second": 13.06, |
|
"step": 9000 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"learning_rate": 1.6834666666666667e-05, |
|
"loss": 1.828, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 2.09, |
|
"eval_loss": 1.7763384580612183, |
|
"eval_runtime": 43.5326, |
|
"eval_samples_per_second": 208.534, |
|
"eval_steps_per_second": 13.048, |
|
"step": 9500 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"learning_rate": 1.6668e-05, |
|
"loss": 1.808, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 2.2, |
|
"eval_loss": 1.7431403398513794, |
|
"eval_runtime": 43.6327, |
|
"eval_samples_per_second": 208.055, |
|
"eval_steps_per_second": 13.018, |
|
"step": 10000 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"learning_rate": 1.6501333333333334e-05, |
|
"loss": 1.7434, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 2.31, |
|
"eval_loss": 1.7204526662826538, |
|
"eval_runtime": 44.0612, |
|
"eval_samples_per_second": 206.032, |
|
"eval_steps_per_second": 12.891, |
|
"step": 10500 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"learning_rate": 1.6334666666666668e-05, |
|
"loss": 1.7527, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 2.42, |
|
"eval_loss": 1.7068791389465332, |
|
"eval_runtime": 43.4892, |
|
"eval_samples_per_second": 208.741, |
|
"eval_steps_per_second": 13.061, |
|
"step": 11000 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"learning_rate": 1.6168333333333336e-05, |
|
"loss": 1.6977, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 2.53, |
|
"eval_loss": 1.6881217956542969, |
|
"eval_runtime": 43.1769, |
|
"eval_samples_per_second": 210.252, |
|
"eval_steps_per_second": 13.155, |
|
"step": 11500 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"learning_rate": 1.600166666666667e-05, |
|
"loss": 1.6854, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 2.64, |
|
"eval_loss": 1.6784749031066895, |
|
"eval_runtime": 42.7581, |
|
"eval_samples_per_second": 212.311, |
|
"eval_steps_per_second": 13.284, |
|
"step": 12000 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"learning_rate": 1.5835e-05, |
|
"loss": 1.6603, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 2.75, |
|
"eval_loss": 1.6570212841033936, |
|
"eval_runtime": 42.6669, |
|
"eval_samples_per_second": 212.765, |
|
"eval_steps_per_second": 13.312, |
|
"step": 12500 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"learning_rate": 1.5668333333333334e-05, |
|
"loss": 1.6725, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 2.86, |
|
"eval_loss": 1.6376469135284424, |
|
"eval_runtime": 43.1686, |
|
"eval_samples_per_second": 210.292, |
|
"eval_steps_per_second": 13.158, |
|
"step": 13000 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"learning_rate": 1.5501666666666668e-05, |
|
"loss": 1.6384, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 2.97, |
|
"eval_loss": 1.6240772008895874, |
|
"eval_runtime": 43.2327, |
|
"eval_samples_per_second": 209.98, |
|
"eval_steps_per_second": 13.138, |
|
"step": 13500 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"learning_rate": 1.5335e-05, |
|
"loss": 1.5917, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 3.08, |
|
"eval_loss": 1.6128900051116943, |
|
"eval_runtime": 42.9712, |
|
"eval_samples_per_second": 211.258, |
|
"eval_steps_per_second": 13.218, |
|
"step": 14000 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"learning_rate": 1.5168333333333334e-05, |
|
"loss": 1.591, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 3.19, |
|
"eval_loss": 1.6054280996322632, |
|
"eval_runtime": 43.0919, |
|
"eval_samples_per_second": 210.666, |
|
"eval_steps_per_second": 13.181, |
|
"step": 14500 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"learning_rate": 1.5001666666666667e-05, |
|
"loss": 1.5936, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 3.3, |
|
"eval_loss": 1.5842323303222656, |
|
"eval_runtime": 43.5874, |
|
"eval_samples_per_second": 208.271, |
|
"eval_steps_per_second": 13.031, |
|
"step": 15000 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"learning_rate": 1.4835000000000001e-05, |
|
"loss": 1.6031, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 3.41, |
|
"eval_loss": 1.5732085704803467, |
|
"eval_runtime": 42.6594, |
|
"eval_samples_per_second": 212.802, |
|
"eval_steps_per_second": 13.315, |
|
"step": 15500 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"learning_rate": 1.4668666666666669e-05, |
|
"loss": 1.554, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 3.53, |
|
"eval_loss": 1.5651994943618774, |
|
"eval_runtime": 42.8927, |
|
"eval_samples_per_second": 211.645, |
|
"eval_steps_per_second": 13.242, |
|
"step": 16000 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"learning_rate": 1.4502000000000001e-05, |
|
"loss": 1.5284, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 3.64, |
|
"eval_loss": 1.5577419996261597, |
|
"eval_runtime": 43.6635, |
|
"eval_samples_per_second": 207.908, |
|
"eval_steps_per_second": 13.009, |
|
"step": 16500 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"learning_rate": 1.4335666666666667e-05, |
|
"loss": 1.5219, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 3.75, |
|
"eval_loss": 1.544019103050232, |
|
"eval_runtime": 43.4005, |
|
"eval_samples_per_second": 209.168, |
|
"eval_steps_per_second": 13.087, |
|
"step": 17000 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"learning_rate": 1.4169000000000001e-05, |
|
"loss": 1.5173, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 3.86, |
|
"eval_loss": 1.5335613489151, |
|
"eval_runtime": 43.512, |
|
"eval_samples_per_second": 208.632, |
|
"eval_steps_per_second": 13.054, |
|
"step": 17500 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"learning_rate": 1.4002333333333335e-05, |
|
"loss": 1.4781, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 3.97, |
|
"eval_loss": 1.5262504816055298, |
|
"eval_runtime": 43.3365, |
|
"eval_samples_per_second": 209.477, |
|
"eval_steps_per_second": 13.107, |
|
"step": 18000 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"learning_rate": 1.3835666666666667e-05, |
|
"loss": 1.4771, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 4.08, |
|
"eval_loss": 1.519882082939148, |
|
"eval_runtime": 43.8272, |
|
"eval_samples_per_second": 207.132, |
|
"eval_steps_per_second": 12.96, |
|
"step": 18500 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"learning_rate": 1.3669e-05, |
|
"loss": 1.4613, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 4.19, |
|
"eval_loss": 1.5142260789871216, |
|
"eval_runtime": 43.3978, |
|
"eval_samples_per_second": 209.181, |
|
"eval_steps_per_second": 13.088, |
|
"step": 19000 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"learning_rate": 1.3502333333333335e-05, |
|
"loss": 1.452, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 4.3, |
|
"eval_loss": 1.503504753112793, |
|
"eval_runtime": 43.2384, |
|
"eval_samples_per_second": 209.952, |
|
"eval_steps_per_second": 13.136, |
|
"step": 19500 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"learning_rate": 1.3335666666666667e-05, |
|
"loss": 1.4563, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 4.41, |
|
"eval_loss": 1.495379090309143, |
|
"eval_runtime": 43.6248, |
|
"eval_samples_per_second": 208.093, |
|
"eval_steps_per_second": 13.02, |
|
"step": 20000 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"learning_rate": 1.3169e-05, |
|
"loss": 1.46, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 4.52, |
|
"eval_loss": 1.4834085702896118, |
|
"eval_runtime": 43.9437, |
|
"eval_samples_per_second": 206.583, |
|
"eval_steps_per_second": 12.926, |
|
"step": 20500 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"learning_rate": 1.3002333333333334e-05, |
|
"loss": 1.4284, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 4.63, |
|
"eval_loss": 1.4811300039291382, |
|
"eval_runtime": 43.2207, |
|
"eval_samples_per_second": 210.038, |
|
"eval_steps_per_second": 13.142, |
|
"step": 21000 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"learning_rate": 1.2836000000000002e-05, |
|
"loss": 1.4527, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 4.74, |
|
"eval_loss": 1.4702121019363403, |
|
"eval_runtime": 43.7967, |
|
"eval_samples_per_second": 207.276, |
|
"eval_steps_per_second": 12.969, |
|
"step": 21500 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"learning_rate": 1.2669333333333334e-05, |
|
"loss": 1.4375, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 4.85, |
|
"eval_loss": 1.4648058414459229, |
|
"eval_runtime": 43.2759, |
|
"eval_samples_per_second": 209.77, |
|
"eval_steps_per_second": 13.125, |
|
"step": 22000 |
|
}, |
|
{ |
|
"epoch": 4.96, |
|
"learning_rate": 1.2502666666666668e-05, |
|
"loss": 1.4093, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 4.96, |
|
"eval_loss": 1.452415108680725, |
|
"eval_runtime": 43.4383, |
|
"eval_samples_per_second": 208.986, |
|
"eval_steps_per_second": 13.076, |
|
"step": 22500 |
|
}, |
|
{ |
|
"epoch": 5.07, |
|
"learning_rate": 1.2336000000000002e-05, |
|
"loss": 1.3688, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 5.07, |
|
"eval_loss": 1.4525853395462036, |
|
"eval_runtime": 44.7597, |
|
"eval_samples_per_second": 202.816, |
|
"eval_steps_per_second": 12.69, |
|
"step": 23000 |
|
}, |
|
{ |
|
"epoch": 5.18, |
|
"learning_rate": 1.2169333333333336e-05, |
|
"loss": 1.3704, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 5.18, |
|
"eval_loss": 1.4470324516296387, |
|
"eval_runtime": 45.0353, |
|
"eval_samples_per_second": 201.575, |
|
"eval_steps_per_second": 12.612, |
|
"step": 23500 |
|
}, |
|
{ |
|
"epoch": 5.29, |
|
"learning_rate": 1.2003e-05, |
|
"loss": 1.3672, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 5.29, |
|
"eval_loss": 1.4429727792739868, |
|
"eval_runtime": 48.0671, |
|
"eval_samples_per_second": 188.861, |
|
"eval_steps_per_second": 11.817, |
|
"step": 24000 |
|
}, |
|
{ |
|
"epoch": 5.4, |
|
"learning_rate": 1.1836333333333334e-05, |
|
"loss": 1.3484, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 5.4, |
|
"eval_loss": 1.4372690916061401, |
|
"eval_runtime": 45.4091, |
|
"eval_samples_per_second": 199.916, |
|
"eval_steps_per_second": 12.508, |
|
"step": 24500 |
|
}, |
|
{ |
|
"epoch": 5.51, |
|
"learning_rate": 1.1669666666666668e-05, |
|
"loss": 1.3446, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 5.51, |
|
"eval_loss": 1.4315961599349976, |
|
"eval_runtime": 44.7878, |
|
"eval_samples_per_second": 202.689, |
|
"eval_steps_per_second": 12.682, |
|
"step": 25000 |
|
}, |
|
{ |
|
"epoch": 5.62, |
|
"learning_rate": 1.1503000000000002e-05, |
|
"loss": 1.3445, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 5.62, |
|
"eval_loss": 1.4315000772476196, |
|
"eval_runtime": 45.134, |
|
"eval_samples_per_second": 201.134, |
|
"eval_steps_per_second": 12.585, |
|
"step": 25500 |
|
}, |
|
{ |
|
"epoch": 5.73, |
|
"learning_rate": 1.1336333333333334e-05, |
|
"loss": 1.3236, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 5.73, |
|
"eval_loss": 1.424834132194519, |
|
"eval_runtime": 44.2677, |
|
"eval_samples_per_second": 205.07, |
|
"eval_steps_per_second": 12.831, |
|
"step": 26000 |
|
}, |
|
{ |
|
"epoch": 5.84, |
|
"learning_rate": 1.1169666666666667e-05, |
|
"loss": 1.3061, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 5.84, |
|
"eval_loss": 1.4215062856674194, |
|
"eval_runtime": 44.2542, |
|
"eval_samples_per_second": 205.133, |
|
"eval_steps_per_second": 12.835, |
|
"step": 26500 |
|
}, |
|
{ |
|
"epoch": 5.95, |
|
"learning_rate": 1.1003000000000001e-05, |
|
"loss": 1.3083, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 5.95, |
|
"eval_loss": 1.4195761680603027, |
|
"eval_runtime": 44.381, |
|
"eval_samples_per_second": 204.547, |
|
"eval_steps_per_second": 12.798, |
|
"step": 27000 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"learning_rate": 1.0836333333333333e-05, |
|
"loss": 1.2689, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 6.06, |
|
"eval_loss": 1.4157874584197998, |
|
"eval_runtime": 44.2987, |
|
"eval_samples_per_second": 204.927, |
|
"eval_steps_per_second": 12.822, |
|
"step": 27500 |
|
}, |
|
{ |
|
"epoch": 6.17, |
|
"learning_rate": 1.0669666666666667e-05, |
|
"loss": 1.2813, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 6.17, |
|
"eval_loss": 1.4157196283340454, |
|
"eval_runtime": 44.6759, |
|
"eval_samples_per_second": 203.197, |
|
"eval_steps_per_second": 12.714, |
|
"step": 28000 |
|
}, |
|
{ |
|
"epoch": 6.28, |
|
"learning_rate": 1.0503333333333335e-05, |
|
"loss": 1.2863, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 6.28, |
|
"eval_loss": 1.41006338596344, |
|
"eval_runtime": 44.666, |
|
"eval_samples_per_second": 203.242, |
|
"eval_steps_per_second": 12.717, |
|
"step": 28500 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"learning_rate": 1.0336666666666669e-05, |
|
"loss": 1.2668, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 6.39, |
|
"eval_loss": 1.403244972229004, |
|
"eval_runtime": 44.6624, |
|
"eval_samples_per_second": 203.258, |
|
"eval_steps_per_second": 12.718, |
|
"step": 29000 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"learning_rate": 1.017e-05, |
|
"loss": 1.2631, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 6.5, |
|
"eval_loss": 1.4050610065460205, |
|
"eval_runtime": 44.987, |
|
"eval_samples_per_second": 201.792, |
|
"eval_steps_per_second": 12.626, |
|
"step": 29500 |
|
}, |
|
{ |
|
"epoch": 6.61, |
|
"learning_rate": 1.0003333333333333e-05, |
|
"loss": 1.2543, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 6.61, |
|
"eval_loss": 1.3968815803527832, |
|
"eval_runtime": 45.3856, |
|
"eval_samples_per_second": 200.019, |
|
"eval_steps_per_second": 12.515, |
|
"step": 30000 |
|
}, |
|
{ |
|
"epoch": 6.72, |
|
"learning_rate": 9.836666666666668e-06, |
|
"loss": 1.2552, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 6.72, |
|
"eval_loss": 1.3993921279907227, |
|
"eval_runtime": 44.6433, |
|
"eval_samples_per_second": 203.345, |
|
"eval_steps_per_second": 12.723, |
|
"step": 30500 |
|
}, |
|
{ |
|
"epoch": 6.83, |
|
"learning_rate": 9.67e-06, |
|
"loss": 1.2626, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 6.83, |
|
"eval_loss": 1.387181043624878, |
|
"eval_runtime": 44.4948, |
|
"eval_samples_per_second": 204.024, |
|
"eval_steps_per_second": 12.766, |
|
"step": 31000 |
|
}, |
|
{ |
|
"epoch": 6.94, |
|
"learning_rate": 9.503333333333334e-06, |
|
"loss": 1.2396, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 6.94, |
|
"eval_loss": 1.3914097547531128, |
|
"eval_runtime": 44.936, |
|
"eval_samples_per_second": 202.021, |
|
"eval_steps_per_second": 12.64, |
|
"step": 31500 |
|
}, |
|
{ |
|
"epoch": 7.05, |
|
"learning_rate": 9.336666666666666e-06, |
|
"loss": 1.2419, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 7.05, |
|
"eval_loss": 1.3853869438171387, |
|
"eval_runtime": 44.4747, |
|
"eval_samples_per_second": 204.116, |
|
"eval_steps_per_second": 12.771, |
|
"step": 32000 |
|
}, |
|
{ |
|
"epoch": 7.16, |
|
"learning_rate": 9.17e-06, |
|
"loss": 1.2421, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 7.16, |
|
"eval_loss": 1.3801844120025635, |
|
"eval_runtime": 44.8621, |
|
"eval_samples_per_second": 202.353, |
|
"eval_steps_per_second": 12.661, |
|
"step": 32500 |
|
}, |
|
{ |
|
"epoch": 7.27, |
|
"learning_rate": 9.003333333333334e-06, |
|
"loss": 1.2177, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 7.27, |
|
"eval_loss": 1.380096673965454, |
|
"eval_runtime": 45.1593, |
|
"eval_samples_per_second": 201.022, |
|
"eval_steps_per_second": 12.578, |
|
"step": 33000 |
|
}, |
|
{ |
|
"epoch": 7.38, |
|
"learning_rate": 8.836666666666668e-06, |
|
"loss": 1.2292, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 7.38, |
|
"eval_loss": 1.3730745315551758, |
|
"eval_runtime": 44.6177, |
|
"eval_samples_per_second": 203.462, |
|
"eval_steps_per_second": 12.73, |
|
"step": 33500 |
|
}, |
|
{ |
|
"epoch": 7.49, |
|
"learning_rate": 8.67e-06, |
|
"loss": 1.2131, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 7.49, |
|
"eval_loss": 1.370631217956543, |
|
"eval_runtime": 45.0076, |
|
"eval_samples_per_second": 201.699, |
|
"eval_steps_per_second": 12.62, |
|
"step": 34000 |
|
}, |
|
{ |
|
"epoch": 7.6, |
|
"learning_rate": 8.503666666666668e-06, |
|
"loss": 1.227, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 7.6, |
|
"eval_loss": 1.3661019802093506, |
|
"eval_runtime": 44.9507, |
|
"eval_samples_per_second": 201.955, |
|
"eval_steps_per_second": 12.636, |
|
"step": 34500 |
|
}, |
|
{ |
|
"epoch": 7.71, |
|
"learning_rate": 8.337e-06, |
|
"loss": 1.2158, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 7.71, |
|
"eval_loss": 1.3633307218551636, |
|
"eval_runtime": 44.5235, |
|
"eval_samples_per_second": 203.892, |
|
"eval_steps_per_second": 12.757, |
|
"step": 35000 |
|
}, |
|
{ |
|
"epoch": 7.82, |
|
"learning_rate": 8.170333333333334e-06, |
|
"loss": 1.2194, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 7.82, |
|
"eval_loss": 1.3616106510162354, |
|
"eval_runtime": 45.4373, |
|
"eval_samples_per_second": 199.792, |
|
"eval_steps_per_second": 12.501, |
|
"step": 35500 |
|
}, |
|
{ |
|
"epoch": 7.93, |
|
"learning_rate": 8.004e-06, |
|
"loss": 1.2261, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 7.93, |
|
"eval_loss": 1.357163429260254, |
|
"eval_runtime": 45.4325, |
|
"eval_samples_per_second": 199.813, |
|
"eval_steps_per_second": 12.502, |
|
"step": 36000 |
|
}, |
|
{ |
|
"epoch": 8.04, |
|
"learning_rate": 7.837666666666666e-06, |
|
"loss": 1.218, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 8.04, |
|
"eval_loss": 1.3611406087875366, |
|
"eval_runtime": 44.5114, |
|
"eval_samples_per_second": 203.948, |
|
"eval_steps_per_second": 12.761, |
|
"step": 36500 |
|
}, |
|
{ |
|
"epoch": 8.15, |
|
"learning_rate": 7.671e-06, |
|
"loss": 1.1658, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 8.15, |
|
"eval_loss": 1.356821060180664, |
|
"eval_runtime": 44.6544, |
|
"eval_samples_per_second": 203.295, |
|
"eval_steps_per_second": 12.72, |
|
"step": 37000 |
|
}, |
|
{ |
|
"epoch": 8.26, |
|
"learning_rate": 7.504333333333334e-06, |
|
"loss": 1.1652, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 8.26, |
|
"eval_loss": 1.35381281375885, |
|
"eval_runtime": 45.0638, |
|
"eval_samples_per_second": 201.448, |
|
"eval_steps_per_second": 12.604, |
|
"step": 37500 |
|
}, |
|
{ |
|
"epoch": 8.37, |
|
"learning_rate": 7.3376666666666675e-06, |
|
"loss": 1.1686, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 8.37, |
|
"eval_loss": 1.3533384799957275, |
|
"eval_runtime": 44.2795, |
|
"eval_samples_per_second": 205.016, |
|
"eval_steps_per_second": 12.828, |
|
"step": 38000 |
|
}, |
|
{ |
|
"epoch": 8.48, |
|
"learning_rate": 7.1710000000000005e-06, |
|
"loss": 1.1666, |
|
"step": 38500 |
|
}, |
|
{ |
|
"epoch": 8.48, |
|
"eval_loss": 1.3513332605361938, |
|
"eval_runtime": 44.2333, |
|
"eval_samples_per_second": 205.23, |
|
"eval_steps_per_second": 12.841, |
|
"step": 38500 |
|
}, |
|
{ |
|
"epoch": 8.59, |
|
"learning_rate": 7.004333333333334e-06, |
|
"loss": 1.1827, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 8.59, |
|
"eval_loss": 1.3496123552322388, |
|
"eval_runtime": 44.8817, |
|
"eval_samples_per_second": 202.265, |
|
"eval_steps_per_second": 12.655, |
|
"step": 39000 |
|
}, |
|
{ |
|
"epoch": 8.7, |
|
"learning_rate": 6.837666666666667e-06, |
|
"loss": 1.1643, |
|
"step": 39500 |
|
}, |
|
{ |
|
"epoch": 8.7, |
|
"eval_loss": 1.3474962711334229, |
|
"eval_runtime": 44.7028, |
|
"eval_samples_per_second": 203.074, |
|
"eval_steps_per_second": 12.706, |
|
"step": 39500 |
|
}, |
|
{ |
|
"epoch": 8.81, |
|
"learning_rate": 6.671000000000001e-06, |
|
"loss": 1.1651, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 8.81, |
|
"eval_loss": 1.3451054096221924, |
|
"eval_runtime": 44.3384, |
|
"eval_samples_per_second": 204.743, |
|
"eval_steps_per_second": 12.811, |
|
"step": 40000 |
|
}, |
|
{ |
|
"epoch": 8.92, |
|
"learning_rate": 6.504333333333334e-06, |
|
"loss": 1.1696, |
|
"step": 40500 |
|
}, |
|
{ |
|
"epoch": 8.92, |
|
"eval_loss": 1.3412760496139526, |
|
"eval_runtime": 44.0851, |
|
"eval_samples_per_second": 205.92, |
|
"eval_steps_per_second": 12.884, |
|
"step": 40500 |
|
}, |
|
{ |
|
"epoch": 9.03, |
|
"learning_rate": 6.337666666666668e-06, |
|
"loss": 1.1582, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 9.03, |
|
"eval_loss": 1.3448957204818726, |
|
"eval_runtime": 43.2139, |
|
"eval_samples_per_second": 210.071, |
|
"eval_steps_per_second": 13.144, |
|
"step": 41000 |
|
}, |
|
{ |
|
"epoch": 9.14, |
|
"learning_rate": 6.171000000000001e-06, |
|
"loss": 1.152, |
|
"step": 41500 |
|
}, |
|
{ |
|
"epoch": 9.14, |
|
"eval_loss": 1.3436229228973389, |
|
"eval_runtime": 43.5301, |
|
"eval_samples_per_second": 208.545, |
|
"eval_steps_per_second": 13.048, |
|
"step": 41500 |
|
}, |
|
{ |
|
"epoch": 9.25, |
|
"learning_rate": 6.004666666666668e-06, |
|
"loss": 1.1177, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 9.25, |
|
"eval_loss": 1.3422951698303223, |
|
"eval_runtime": 43.2428, |
|
"eval_samples_per_second": 209.931, |
|
"eval_steps_per_second": 13.135, |
|
"step": 42000 |
|
}, |
|
{ |
|
"epoch": 9.36, |
|
"learning_rate": 5.838000000000001e-06, |
|
"loss": 1.1195, |
|
"step": 42500 |
|
}, |
|
{ |
|
"epoch": 9.36, |
|
"eval_loss": 1.340394139289856, |
|
"eval_runtime": 43.8891, |
|
"eval_samples_per_second": 206.839, |
|
"eval_steps_per_second": 12.942, |
|
"step": 42500 |
|
}, |
|
{ |
|
"epoch": 9.47, |
|
"learning_rate": 5.6713333333333345e-06, |
|
"loss": 1.1389, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 9.47, |
|
"eval_loss": 1.3390538692474365, |
|
"eval_runtime": 43.1572, |
|
"eval_samples_per_second": 210.347, |
|
"eval_steps_per_second": 13.161, |
|
"step": 43000 |
|
}, |
|
{ |
|
"epoch": 9.58, |
|
"learning_rate": 5.5046666666666674e-06, |
|
"loss": 1.1316, |
|
"step": 43500 |
|
}, |
|
{ |
|
"epoch": 9.58, |
|
"eval_loss": 1.3349236249923706, |
|
"eval_runtime": 43.9132, |
|
"eval_samples_per_second": 206.726, |
|
"eval_steps_per_second": 12.935, |
|
"step": 43500 |
|
}, |
|
{ |
|
"epoch": 9.69, |
|
"learning_rate": 5.3383333333333345e-06, |
|
"loss": 1.1376, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 9.69, |
|
"eval_loss": 1.3336918354034424, |
|
"eval_runtime": 43.0007, |
|
"eval_samples_per_second": 211.113, |
|
"eval_steps_per_second": 13.209, |
|
"step": 44000 |
|
}, |
|
{ |
|
"epoch": 9.8, |
|
"learning_rate": 5.171666666666667e-06, |
|
"loss": 1.1407, |
|
"step": 44500 |
|
}, |
|
{ |
|
"epoch": 9.8, |
|
"eval_loss": 1.33429753780365, |
|
"eval_runtime": 43.2578, |
|
"eval_samples_per_second": 209.858, |
|
"eval_steps_per_second": 13.131, |
|
"step": 44500 |
|
}, |
|
{ |
|
"epoch": 9.91, |
|
"learning_rate": 5.0053333333333344e-06, |
|
"loss": 1.1149, |
|
"step": 45000 |
|
}, |
|
{ |
|
"epoch": 9.91, |
|
"eval_bleu": 29.77431864288693, |
|
"eval_loss": 1.3337864875793457, |
|
"eval_runtime": 1068.0893, |
|
"eval_samples_per_second": 8.499, |
|
"eval_steps_per_second": 0.532, |
|
"step": 45000 |
|
} |
|
], |
|
"logging_steps": 500, |
|
"max_steps": 60000, |
|
"num_train_epochs": 14, |
|
"save_steps": 500, |
|
"total_flos": 4.560668249043763e+16, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|