sf_peft_test / checkpoint-100 /trainer_state.json
kastan's picture
First attempt at a BitsNBytes QLoRA Falcon model.
0eccbca
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.64,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.06,
"learning_rate": 0.0002,
"loss": 2.8722,
"step": 10
},
{
"epoch": 0.13,
"learning_rate": 0.0002,
"loss": 2.7951,
"step": 20
},
{
"epoch": 0.19,
"learning_rate": 0.0002,
"loss": 2.7847,
"step": 30
},
{
"epoch": 0.26,
"learning_rate": 0.0002,
"loss": 2.7507,
"step": 40
},
{
"epoch": 0.32,
"learning_rate": 0.0002,
"loss": 2.7355,
"step": 50
},
{
"epoch": 0.38,
"learning_rate": 0.0002,
"loss": 2.7085,
"step": 60
},
{
"epoch": 0.45,
"learning_rate": 0.0002,
"loss": 2.6827,
"step": 70
},
{
"epoch": 0.51,
"learning_rate": 0.0002,
"loss": 2.6429,
"step": 80
},
{
"epoch": 0.58,
"learning_rate": 0.0002,
"loss": 2.6901,
"step": 90
},
{
"epoch": 0.64,
"learning_rate": 0.0002,
"loss": 2.688,
"step": 100
}
],
"max_steps": 156,
"num_train_epochs": 1,
"total_flos": 6.770832332292096e+16,
"trial_name": null,
"trial_params": null
}