Training in progress, step 2100, checkpoint
Browse files- last-checkpoint/adapter_model.safetensors +1 -1
- last-checkpoint/global_step2100/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step2100/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step2100/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step2100/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt +3 -0
- last-checkpoint/global_step2100/zero_pp_rank_0_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/global_step2100/zero_pp_rank_1_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/global_step2100/zero_pp_rank_2_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/global_step2100/zero_pp_rank_3_mp_rank_00_model_states.pt +3 -0
- last-checkpoint/latest +1 -1
- last-checkpoint/rng_state_0.pth +1 -1
- last-checkpoint/rng_state_1.pth +1 -1
- last-checkpoint/rng_state_2.pth +1 -1
- last-checkpoint/rng_state_3.pth +1 -1
- last-checkpoint/scheduler.pt +1 -1
- last-checkpoint/trainer_state.json +93 -4
last-checkpoint/adapter_model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 29034840
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7f75c51d9ca969a9d3cbb26206d8c3751152111b430ee8f78c774b9d99426948
|
3 |
size 29034840
|
last-checkpoint/global_step2100/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2f7aa16dee243b34185cf056e35e50a3678c95dbbbc5f9fd1110ebf3b61fbcbd
|
3 |
+
size 43429616
|
last-checkpoint/global_step2100/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:126ebc8a1d461eb6971e0fe5db0deed7713d984269085c10b7479e2d2a342936
|
3 |
+
size 43429616
|
last-checkpoint/global_step2100/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e47df0dcaf62d79ebb54425a75af03024182dc6d216cfc938c6349ced6b15d46
|
3 |
+
size 43429616
|
last-checkpoint/global_step2100/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4d234070e3b5451c249b57b9d521faa0b72267c278a1ff149317bed3f637f667
|
3 |
+
size 43429616
|
last-checkpoint/global_step2100/zero_pp_rank_0_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eeda14a8789d1fd88f7eebe00675f2572359184439f7c4c60b672fbdb030c435
|
3 |
+
size 637299
|
last-checkpoint/global_step2100/zero_pp_rank_1_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:aec295f73f879f7574e82192c1ccc1329fd81288043991b95b358440fd8f90f7
|
3 |
+
size 637171
|
last-checkpoint/global_step2100/zero_pp_rank_2_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:abae9bad6bb987038dcd73f0e19d994e45cae40e20cd055d436706a0b5fe6064
|
3 |
+
size 637171
|
last-checkpoint/global_step2100/zero_pp_rank_3_mp_rank_00_model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:86bfaf44e1fb94ad73e3fc28120b85b71fc1b7b472586a7f8b29d56ad2e71f3c
|
3 |
+
size 637171
|
last-checkpoint/latest
CHANGED
@@ -1 +1 @@
|
|
1 |
-
|
|
|
1 |
+
global_step2100
|
last-checkpoint/rng_state_0.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5d1ddd7e7b4dc44903837b0414e4659f8383cd8f16b41dd396d4eaf5b9829f79
|
3 |
size 15024
|
last-checkpoint/rng_state_1.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:eeddfb77fe4d3b495c4e08307767e08df90e96ef241c3eb80d5f75adec393e80
|
3 |
size 15024
|
last-checkpoint/rng_state_2.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9e6260196fabb00061b1f1c8de6288382570dc14d02d2aa308050ca858880a97
|
3 |
size 15024
|
last-checkpoint/rng_state_3.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 15024
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ea023bc5b1def54e0c49389175c0fae812f5f764c502525ce775d993d5ab2c03
|
3 |
size 15024
|
last-checkpoint/scheduler.pt
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 1064
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:41ca6ff868655333e43bb2ded3dfe8c66b7ed82fa8a98bace2e730c42bb9959b
|
3 |
size 1064
|
last-checkpoint/trainer_state.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
"best_metric": 0.4194311797618866,
|
3 |
"best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-1800",
|
4 |
-
"epoch": 0.
|
5 |
"eval_steps": 50,
|
6 |
-
"global_step":
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
@@ -3656,11 +3656,100 @@
|
|
3656 |
"eval_steps_per_second": 0.786,
|
3657 |
"num_input_tokens_seen": 21527496,
|
3658 |
"step": 2050
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3659 |
}
|
3660 |
],
|
3661 |
"logging_steps": 5,
|
3662 |
"max_steps": 3400,
|
3663 |
-
"num_input_tokens_seen":
|
3664 |
"num_train_epochs": 1,
|
3665 |
"save_steps": 50,
|
3666 |
"stateful_callbacks": {
|
@@ -3675,7 +3764,7 @@
|
|
3675 |
"attributes": {}
|
3676 |
}
|
3677 |
},
|
3678 |
-
"total_flos":
|
3679 |
"train_batch_size": 1,
|
3680 |
"trial_name": null,
|
3681 |
"trial_params": null
|
|
|
1 |
{
|
2 |
"best_metric": 0.4194311797618866,
|
3 |
"best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-1800",
|
4 |
+
"epoch": 0.5408189544166881,
|
5 |
"eval_steps": 50,
|
6 |
+
"global_step": 2100,
|
7 |
"is_hyper_param_search": false,
|
8 |
"is_local_process_zero": true,
|
9 |
"is_world_process_zero": true,
|
|
|
3656 |
"eval_steps_per_second": 0.786,
|
3657 |
"num_input_tokens_seen": 21527496,
|
3658 |
"step": 2050
|
3659 |
+
},
|
3660 |
+
{
|
3661 |
+
"epoch": 0.5292299768220448,
|
3662 |
+
"grad_norm": 2.354802898537749,
|
3663 |
+
"learning_rate": 3.7019906676411446e-05,
|
3664 |
+
"loss": 0.3937,
|
3665 |
+
"num_input_tokens_seen": 21579816,
|
3666 |
+
"step": 2055
|
3667 |
+
},
|
3668 |
+
{
|
3669 |
+
"epoch": 0.5305176409992274,
|
3670 |
+
"grad_norm": 6.103564629763687,
|
3671 |
+
"learning_rate": 3.678524023788735e-05,
|
3672 |
+
"loss": 0.4039,
|
3673 |
+
"num_input_tokens_seen": 21631776,
|
3674 |
+
"step": 2060
|
3675 |
+
},
|
3676 |
+
{
|
3677 |
+
"epoch": 0.53180530517641,
|
3678 |
+
"grad_norm": 3.5285927755997655,
|
3679 |
+
"learning_rate": 3.6550886330380665e-05,
|
3680 |
+
"loss": 0.3501,
|
3681 |
+
"num_input_tokens_seen": 21683608,
|
3682 |
+
"step": 2065
|
3683 |
+
},
|
3684 |
+
{
|
3685 |
+
"epoch": 0.5330929693535926,
|
3686 |
+
"grad_norm": 4.520457201010945,
|
3687 |
+
"learning_rate": 3.631685049639586e-05,
|
3688 |
+
"loss": 0.3334,
|
3689 |
+
"num_input_tokens_seen": 21735672,
|
3690 |
+
"step": 2070
|
3691 |
+
},
|
3692 |
+
{
|
3693 |
+
"epoch": 0.5343806335307751,
|
3694 |
+
"grad_norm": 2.4877611413408554,
|
3695 |
+
"learning_rate": 3.608313827091493e-05,
|
3696 |
+
"loss": 0.3292,
|
3697 |
+
"num_input_tokens_seen": 21787592,
|
3698 |
+
"step": 2075
|
3699 |
+
},
|
3700 |
+
{
|
3701 |
+
"epoch": 0.5356682977079578,
|
3702 |
+
"grad_norm": 1.8075536605690385,
|
3703 |
+
"learning_rate": 3.5849755181266474e-05,
|
3704 |
+
"loss": 0.3616,
|
3705 |
+
"num_input_tokens_seen": 21840448,
|
3706 |
+
"step": 2080
|
3707 |
+
},
|
3708 |
+
{
|
3709 |
+
"epoch": 0.5369559618851404,
|
3710 |
+
"grad_norm": 10.058001239765861,
|
3711 |
+
"learning_rate": 3.5616706746995026e-05,
|
3712 |
+
"loss": 0.3082,
|
3713 |
+
"num_input_tokens_seen": 21893096,
|
3714 |
+
"step": 2085
|
3715 |
+
},
|
3716 |
+
{
|
3717 |
+
"epoch": 0.5382436260623229,
|
3718 |
+
"grad_norm": 5.955966804633529,
|
3719 |
+
"learning_rate": 3.538399847973036e-05,
|
3720 |
+
"loss": 0.293,
|
3721 |
+
"num_input_tokens_seen": 21945184,
|
3722 |
+
"step": 2090
|
3723 |
+
},
|
3724 |
+
{
|
3725 |
+
"epoch": 0.5395312902395055,
|
3726 |
+
"grad_norm": 5.3172870269416554,
|
3727 |
+
"learning_rate": 3.515163588305735e-05,
|
3728 |
+
"loss": 0.3835,
|
3729 |
+
"num_input_tokens_seen": 21998016,
|
3730 |
+
"step": 2095
|
3731 |
+
},
|
3732 |
+
{
|
3733 |
+
"epoch": 0.5408189544166881,
|
3734 |
+
"grad_norm": 10.736456867600818,
|
3735 |
+
"learning_rate": 3.491962445238569e-05,
|
3736 |
+
"loss": 0.3983,
|
3737 |
+
"num_input_tokens_seen": 22050376,
|
3738 |
+
"step": 2100
|
3739 |
+
},
|
3740 |
+
{
|
3741 |
+
"epoch": 0.5408189544166881,
|
3742 |
+
"eval_loss": 0.481829971075058,
|
3743 |
+
"eval_runtime": 37.9835,
|
3744 |
+
"eval_samples_per_second": 3.159,
|
3745 |
+
"eval_steps_per_second": 0.79,
|
3746 |
+
"num_input_tokens_seen": 22050376,
|
3747 |
+
"step": 2100
|
3748 |
}
|
3749 |
],
|
3750 |
"logging_steps": 5,
|
3751 |
"max_steps": 3400,
|
3752 |
+
"num_input_tokens_seen": 22050376,
|
3753 |
"num_train_epochs": 1,
|
3754 |
"save_steps": 50,
|
3755 |
"stateful_callbacks": {
|
|
|
3764 |
"attributes": {}
|
3765 |
}
|
3766 |
},
|
3767 |
+
"total_flos": 1454769949900800.0,
|
3768 |
"train_batch_size": 1,
|
3769 |
"trial_name": null,
|
3770 |
"trial_params": null
|