Femboyuwu2000 commited on
Commit
678205d
1 Parent(s): 70ae419

Training in progress, step 220, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e1143902ff3564e4c1002e08abff0e3901f7b2864ff1561402b11204e5362411
3
  size 4725640
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3078213dcd15bc291dc37ca3077f9efb155cb015e2ecfc039f1079c016cd9a50
3
  size 4725640
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:db52ebf37465555ffb2a88505ae648a96569156a76709768fa9d1a22d929c602
3
  size 2423738
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:684dc57c1e94460d88f7247e65733810667abcc8211761ae7f175a94fb2a5c38
3
  size 2423738
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8c1160b60ff5f8d1ce07bd99cf7c1f641950e24f450dc82b69252f6b166db4c3
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70a160794c42b620b052ded8acee8ed084ab34b149aee53004744c1a7084b810
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:86f012eba30607df0ae053cd324d767650b13de6ae604fa48d8a90e45bcf2f90
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a258940d20544cb994c096c67b2810560f36bc1c5c956be9ea4d5468f6460551
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.00015565050236199636,
5
  "eval_steps": 500,
6
- "global_step": 200,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -77,6 +77,13 @@
77
  "learning_rate": 3.316666666666667e-05,
78
  "loss": 3.0499,
79
  "step": 200
 
 
 
 
 
 
 
80
  }
81
  ],
82
  "logging_steps": 20,
@@ -84,7 +91,7 @@
84
  "num_input_tokens_seen": 0,
85
  "num_train_epochs": 1,
86
  "save_steps": 20,
87
- "total_flos": 985164675969024.0,
88
  "train_batch_size": 1,
89
  "trial_name": null,
90
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.00017121555259819602,
5
  "eval_steps": 500,
6
+ "global_step": 220,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
77
  "learning_rate": 3.316666666666667e-05,
78
  "loss": 3.0499,
79
  "step": 200
80
+ },
81
+ {
82
+ "epoch": 0.0,
83
+ "grad_norm": 6.314597129821777,
84
+ "learning_rate": 3.65e-05,
85
+ "loss": 2.8568,
86
+ "step": 220
87
  }
88
  ],
89
  "logging_steps": 20,
 
91
  "num_input_tokens_seen": 0,
92
  "num_train_epochs": 1,
93
  "save_steps": 20,
94
+ "total_flos": 1070150797246464.0,
95
  "train_batch_size": 1,
96
  "trial_name": null,
97
  "trial_params": null