|
{ |
|
"best_metric": 0.5891016200294551, |
|
"best_model_checkpoint": "videomae-base-finetuned-lift-data-resize/checkpoint-140", |
|
"epoch": 7.102564102564102, |
|
"eval_steps": 500, |
|
"global_step": 156, |
|
"is_hyper_param_search": false, |
|
"is_local_process_zero": true, |
|
"is_world_process_zero": true, |
|
"log_history": [ |
|
{ |
|
"epoch": 0.0641025641025641, |
|
"grad_norm": 5.635838508605957, |
|
"learning_rate": 3.125e-05, |
|
"loss": 1.6404, |
|
"step": 10 |
|
}, |
|
{ |
|
"epoch": 0.1282051282051282, |
|
"grad_norm": 5.812517166137695, |
|
"learning_rate": 4.8571428571428576e-05, |
|
"loss": 1.5758, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 0.1282051282051282, |
|
"eval_accuracy": 0.29013254786450665, |
|
"eval_loss": 1.6618727445602417, |
|
"eval_runtime": 2083.0077, |
|
"eval_samples_per_second": 0.326, |
|
"eval_steps_per_second": 0.041, |
|
"step": 20 |
|
}, |
|
{ |
|
"epoch": 1.064102564102564, |
|
"grad_norm": 4.693382740020752, |
|
"learning_rate": 4.5e-05, |
|
"loss": 1.3588, |
|
"step": 30 |
|
}, |
|
{ |
|
"epoch": 1.1282051282051282, |
|
"grad_norm": 7.194770336151123, |
|
"learning_rate": 4.1428571428571437e-05, |
|
"loss": 1.3067, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 1.1282051282051282, |
|
"eval_accuracy": 0.29896907216494845, |
|
"eval_loss": 1.6047614812850952, |
|
"eval_runtime": 2110.0361, |
|
"eval_samples_per_second": 0.322, |
|
"eval_steps_per_second": 0.04, |
|
"step": 40 |
|
}, |
|
{ |
|
"epoch": 2.064102564102564, |
|
"grad_norm": 9.944411277770996, |
|
"learning_rate": 3.785714285714286e-05, |
|
"loss": 1.1955, |
|
"step": 50 |
|
}, |
|
{ |
|
"epoch": 2.128205128205128, |
|
"grad_norm": 8.486799240112305, |
|
"learning_rate": 3.428571428571429e-05, |
|
"loss": 1.3787, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 2.128205128205128, |
|
"eval_accuracy": 0.3181148748159057, |
|
"eval_loss": 1.4723353385925293, |
|
"eval_runtime": 2076.9142, |
|
"eval_samples_per_second": 0.327, |
|
"eval_steps_per_second": 0.041, |
|
"step": 60 |
|
}, |
|
{ |
|
"epoch": 3.064102564102564, |
|
"grad_norm": 10.37818717956543, |
|
"learning_rate": 3.071428571428572e-05, |
|
"loss": 1.2217, |
|
"step": 70 |
|
}, |
|
{ |
|
"epoch": 3.128205128205128, |
|
"grad_norm": 11.789801597595215, |
|
"learning_rate": 2.714285714285714e-05, |
|
"loss": 1.1642, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 3.128205128205128, |
|
"eval_accuracy": 0.3004418262150221, |
|
"eval_loss": 1.4191110134124756, |
|
"eval_runtime": 2171.4394, |
|
"eval_samples_per_second": 0.313, |
|
"eval_steps_per_second": 0.039, |
|
"step": 80 |
|
}, |
|
{ |
|
"epoch": 4.064102564102564, |
|
"grad_norm": 4.555699348449707, |
|
"learning_rate": 2.357142857142857e-05, |
|
"loss": 1.0205, |
|
"step": 90 |
|
}, |
|
{ |
|
"epoch": 4.128205128205128, |
|
"grad_norm": 7.893691539764404, |
|
"learning_rate": 2e-05, |
|
"loss": 1.1172, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 4.128205128205128, |
|
"eval_accuracy": 0.31958762886597936, |
|
"eval_loss": 1.2374299764633179, |
|
"eval_runtime": 2014.151, |
|
"eval_samples_per_second": 0.337, |
|
"eval_steps_per_second": 0.042, |
|
"step": 100 |
|
}, |
|
{ |
|
"epoch": 5.064102564102564, |
|
"grad_norm": 4.984745979309082, |
|
"learning_rate": 1.642857142857143e-05, |
|
"loss": 1.0003, |
|
"step": 110 |
|
}, |
|
{ |
|
"epoch": 5.128205128205128, |
|
"grad_norm": 9.872081756591797, |
|
"learning_rate": 1.2857142857142857e-05, |
|
"loss": 0.8982, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 5.128205128205128, |
|
"eval_accuracy": 0.5655375552282769, |
|
"eval_loss": 1.0099185705184937, |
|
"eval_runtime": 2071.3572, |
|
"eval_samples_per_second": 0.328, |
|
"eval_steps_per_second": 0.041, |
|
"step": 120 |
|
}, |
|
{ |
|
"epoch": 6.064102564102564, |
|
"grad_norm": 8.504035949707031, |
|
"learning_rate": 9.285714285714286e-06, |
|
"loss": 0.8094, |
|
"step": 130 |
|
}, |
|
{ |
|
"epoch": 6.128205128205128, |
|
"grad_norm": 15.16745376586914, |
|
"learning_rate": 5.7142857142857145e-06, |
|
"loss": 0.915, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 6.128205128205128, |
|
"eval_accuracy": 0.5891016200294551, |
|
"eval_loss": 0.9540217518806458, |
|
"eval_runtime": 2043.7965, |
|
"eval_samples_per_second": 0.332, |
|
"eval_steps_per_second": 0.042, |
|
"step": 140 |
|
}, |
|
{ |
|
"epoch": 7.064102564102564, |
|
"grad_norm": 7.420783519744873, |
|
"learning_rate": 2.142857142857143e-06, |
|
"loss": 0.7809, |
|
"step": 150 |
|
}, |
|
{ |
|
"epoch": 7.102564102564102, |
|
"eval_accuracy": 0.5714285714285714, |
|
"eval_loss": 0.9188718795776367, |
|
"eval_runtime": 2011.7001, |
|
"eval_samples_per_second": 0.338, |
|
"eval_steps_per_second": 0.042, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 7.102564102564102, |
|
"step": 156, |
|
"total_flos": 1.528962156685394e+18, |
|
"train_loss": 1.1473380480057154, |
|
"train_runtime": 20559.8951, |
|
"train_samples_per_second": 0.061, |
|
"train_steps_per_second": 0.008 |
|
}, |
|
{ |
|
"epoch": 7.102564102564102, |
|
"eval_accuracy": 0.7680890538033395, |
|
"eval_loss": 0.8145598769187927, |
|
"eval_runtime": 1580.5762, |
|
"eval_samples_per_second": 0.341, |
|
"eval_steps_per_second": 0.043, |
|
"step": 156 |
|
}, |
|
{ |
|
"epoch": 7.102564102564102, |
|
"eval_accuracy": 0.7680890538033395, |
|
"eval_loss": 0.8150683641433716, |
|
"eval_runtime": 1611.9221, |
|
"eval_samples_per_second": 0.334, |
|
"eval_steps_per_second": 0.042, |
|
"step": 156 |
|
} |
|
], |
|
"logging_steps": 10, |
|
"max_steps": 156, |
|
"num_input_tokens_seen": 0, |
|
"num_train_epochs": 9223372036854775807, |
|
"save_steps": 500, |
|
"stateful_callbacks": { |
|
"TrainerControl": { |
|
"args": { |
|
"should_epoch_stop": false, |
|
"should_evaluate": false, |
|
"should_log": false, |
|
"should_save": true, |
|
"should_training_stop": true |
|
}, |
|
"attributes": {} |
|
} |
|
}, |
|
"total_flos": 1.528962156685394e+18, |
|
"train_batch_size": 8, |
|
"trial_name": null, |
|
"trial_params": null |
|
} |
|
|