ben81828 commited on
Commit
7d5ff44
·
verified ·
1 Parent(s): 062a34b

Training in progress, step 850, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:48b3ad121c980fed4c4f6e332f8d74b8c34f8719af7ea7fe02d0238882dfb6ca
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c194e4011ebc3d53bf21924236aa69f02cd3e886bdcc096049ceabfa8e037964
3
  size 29034840
last-checkpoint/global_step850/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:34100fa86cad3e9f6e41fe4c2d38f267a77fc9e2dd8b5fd799f7787b501bfe3e
3
+ size 43429616
last-checkpoint/global_step850/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e1e44c14b7c791b760d7023aba18ee7a5b74a7d72087679039616a97ca38679
3
+ size 43429616
last-checkpoint/global_step850/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb42580270678586b4015104dd0b1978995640e143c570f30e0d2d55fddaf5d4
3
+ size 43429616
last-checkpoint/global_step850/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efea34847e37bfe5bb8c884f5166d8cb467598a44096ccdc7a2d925a98b3a749
3
+ size 43429616
last-checkpoint/global_step850/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b18337b921f0aba380566f71c4857b67981734dc7975b4ed43e57711a412ef28
3
+ size 637299
last-checkpoint/global_step850/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d144742df08d3730450d7fb36709b9a81b06278b4025a5f5b7cf8306696ee8ed
3
+ size 637171
last-checkpoint/global_step850/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8a1e27c63d553ea81d8a5f4f57f82e6fd2dab396822dedba339d93c3e67b54fd
3
+ size 637171
last-checkpoint/global_step850/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4777d80a9e8c2d0c23709b7630c4e2ffc82a5624bbdac8e3a2d838a612e80caa
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step800
 
1
+ global_step850
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b4f003069486a57c6ac033f30cf4c4213eb6b7d659bab68a5a50fdb8da7c4118
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:36c9044354f826de248840acaaec171f816609c147a664089731a0570deef948
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a016ef89b4392d083b2c15a7cf06a39bc61a759f648cf6dc03f1c32b89a526aa
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54f4c4013326db4e7267b656aaf72b86570f8aeee91ad39242a416cf8b963191
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9b56fe0893036dc052d18d90feba4328b90ea71561942150b07406ac3d7a700e
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:43e910793831957d8685c316138c33eef8867edf60052477dc9ad6ec0c6da901
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c0c203d12c2c308dab785ed672c9ca27fb6a2f72acd1e1552d1516c7b0006013
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cfd0d71ef5a6c58d9f1d46851f4b1e699ca8a50ab3223cfb39668895cffeef2
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d85b710a5709549c0b4daddcc052f2ed242a5d916ac9ca030c805e7ff501c88
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e24eaa8963cd872c048c2c655789d678b8c3fcd1c77ca0d663feee5857d2e34
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.5219093561172485,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-800",
4
- "epoch": 0.20602626834921453,
5
  "eval_steps": 50,
6
- "global_step": 800,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1431,11 +1431,100 @@
1431
  "eval_steps_per_second": 0.755,
1432
  "num_input_tokens_seen": 8393696,
1433
  "step": 800
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1434
  }
1435
  ],
1436
  "logging_steps": 5,
1437
  "max_steps": 3400,
1438
- "num_input_tokens_seen": 8393696,
1439
  "num_train_epochs": 1,
1440
  "save_steps": 50,
1441
  "stateful_callbacks": {
@@ -1450,7 +1539,7 @@
1450
  "attributes": {}
1451
  }
1452
  },
1453
- "total_flos": 553685057798144.0,
1454
  "train_batch_size": 1,
1455
  "trial_name": null,
1456
  "trial_params": null
 
1
  {
2
  "best_metric": 0.5219093561172485,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_detect_scale4/lora/sft/checkpoint-800",
4
+ "epoch": 0.21890291012104043,
5
  "eval_steps": 50,
6
+ "global_step": 850,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1431
  "eval_steps_per_second": 0.755,
1432
  "num_input_tokens_seen": 8393696,
1433
  "step": 800
1434
+ },
1435
+ {
1436
+ "epoch": 0.20731393252639713,
1437
+ "grad_norm": 4.254130676908817,
1438
+ "learning_rate": 9.076296203830579e-05,
1439
+ "loss": 0.5449,
1440
+ "num_input_tokens_seen": 8446496,
1441
+ "step": 805
1442
+ },
1443
+ {
1444
+ "epoch": 0.2086015967035797,
1445
+ "grad_norm": 5.6525741285524145,
1446
+ "learning_rate": 9.062166909283062e-05,
1447
+ "loss": 0.5625,
1448
+ "num_input_tokens_seen": 8499544,
1449
+ "step": 810
1450
+ },
1451
+ {
1452
+ "epoch": 0.2098892608807623,
1453
+ "grad_norm": 3.8041246225911345,
1454
+ "learning_rate": 9.047941543889014e-05,
1455
+ "loss": 0.5564,
1456
+ "num_input_tokens_seen": 8552568,
1457
+ "step": 815
1458
+ },
1459
+ {
1460
+ "epoch": 0.2111769250579449,
1461
+ "grad_norm": 3.803732280546421,
1462
+ "learning_rate": 9.033620444080428e-05,
1463
+ "loss": 0.5487,
1464
+ "num_input_tokens_seen": 8605560,
1465
+ "step": 820
1466
+ },
1467
+ {
1468
+ "epoch": 0.21246458923512748,
1469
+ "grad_norm": 2.8518948364927925,
1470
+ "learning_rate": 9.019203948553422e-05,
1471
+ "loss": 0.5719,
1472
+ "num_input_tokens_seen": 8657704,
1473
+ "step": 825
1474
+ },
1475
+ {
1476
+ "epoch": 0.21375225341231008,
1477
+ "grad_norm": 3.939376115862177,
1478
+ "learning_rate": 9.004692398260244e-05,
1479
+ "loss": 0.5235,
1480
+ "num_input_tokens_seen": 8711088,
1481
+ "step": 830
1482
+ },
1483
+ {
1484
+ "epoch": 0.21503991758949267,
1485
+ "grad_norm": 6.635912128499916,
1486
+ "learning_rate": 8.9900861364012e-05,
1487
+ "loss": 0.5566,
1488
+ "num_input_tokens_seen": 8763712,
1489
+ "step": 835
1490
+ },
1491
+ {
1492
+ "epoch": 0.21632758176667524,
1493
+ "grad_norm": 3.7547407090496687,
1494
+ "learning_rate": 8.975385508416532e-05,
1495
+ "loss": 0.482,
1496
+ "num_input_tokens_seen": 8815760,
1497
+ "step": 840
1498
+ },
1499
+ {
1500
+ "epoch": 0.21761524594385784,
1501
+ "grad_norm": 4.093006904445721,
1502
+ "learning_rate": 8.960590861978265e-05,
1503
+ "loss": 0.5046,
1504
+ "num_input_tokens_seen": 8867720,
1505
+ "step": 845
1506
+ },
1507
+ {
1508
+ "epoch": 0.21890291012104043,
1509
+ "grad_norm": 11.397392997722068,
1510
+ "learning_rate": 8.945702546981969e-05,
1511
+ "loss": 0.5063,
1512
+ "num_input_tokens_seen": 8919608,
1513
+ "step": 850
1514
+ },
1515
+ {
1516
+ "epoch": 0.21890291012104043,
1517
+ "eval_loss": 0.5525640249252319,
1518
+ "eval_runtime": 39.0469,
1519
+ "eval_samples_per_second": 3.073,
1520
+ "eval_steps_per_second": 0.768,
1521
+ "num_input_tokens_seen": 8919608,
1522
+ "step": 850
1523
  }
1524
  ],
1525
  "logging_steps": 5,
1526
  "max_steps": 3400,
1527
+ "num_input_tokens_seen": 8919608,
1528
  "num_train_epochs": 1,
1529
  "save_steps": 50,
1530
  "stateful_callbacks": {
 
1539
  "attributes": {}
1540
  }
1541
  },
1542
+ "total_flos": 588414853316608.0,
1543
  "train_batch_size": 1,
1544
  "trial_name": null,
1545
  "trial_params": null