winnieyangwannan commited on
Commit
56ca234
·
verified ·
1 Parent(s): a52c806

Training in progress, step 30, checkpoint

Browse files
checkpoint-30/adapter_config.json CHANGED
@@ -23,11 +23,11 @@
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
26
- "k_proj",
27
  "v_proj",
28
- "o_proj",
29
- "gate_proj",
30
  "q_proj",
 
 
31
  "down_proj",
32
  "up_proj"
33
  ],
 
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
 
26
  "v_proj",
27
+ "k_proj",
 
28
  "q_proj",
29
+ "gate_proj",
30
+ "o_proj",
31
  "down_proj",
32
  "up_proj"
33
  ],
checkpoint-30/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:555ae9dbdd5399f736ccf0e7d4364fc478ce7228e0831c5a78b55cfa91611f17
3
  size 145287696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ab952efab01e33f03d0f19d1ee5cfc15024e4555aee826756ebcbe6c278b898
3
  size 145287696
checkpoint-30/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:eb50010124ba399674d41831ed4fc601f93b9757ec615e236f0526b40d43b2d0
3
  size 290833618
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29228ccd3a6abf030b6d73bf0611ee584514c664d35fe090c4459870489a1df3
3
  size 290833618
checkpoint-30/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:efe70a1ac954116cafd503663506e71c6dc98e58b8f6de74290b12eb58debe44
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a0978cabf0cce48bd72f8b2a49841b57fc9d030790f9386a5448e5e10126d59
3
  size 1064
checkpoint-30/trainer_state.json CHANGED
@@ -10,54 +10,54 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.021739130434782608,
13
- "grad_norm": 2.138256549835205,
14
- "learning_rate": 4.5e-05,
15
- "loss": 2.3657,
16
  "step": 10
17
  },
18
  {
19
  "epoch": 0.021739130434782608,
20
- "eval_loss": 1.5849242210388184,
21
- "eval_runtime": 9.6002,
22
- "eval_samples_per_second": 49.999,
23
- "eval_steps_per_second": 1.562,
24
  "step": 10
25
  },
26
  {
27
  "epoch": 0.043478260869565216,
28
- "grad_norm": 1.1203280687332153,
29
- "learning_rate": 4e-05,
30
- "loss": 1.2125,
31
  "step": 20
32
  },
33
  {
34
  "epoch": 0.043478260869565216,
35
- "eval_loss": 1.04584538936615,
36
- "eval_runtime": 9.677,
37
- "eval_samples_per_second": 49.602,
38
- "eval_steps_per_second": 1.55,
39
  "step": 20
40
  },
41
  {
42
  "epoch": 0.06521739130434782,
43
- "grad_norm": 1.064527988433838,
44
- "learning_rate": 3.5e-05,
45
- "loss": 0.867,
46
  "step": 30
47
  },
48
  {
49
  "epoch": 0.06521739130434782,
50
- "eval_loss": 0.9389117360115051,
51
- "eval_runtime": 9.7352,
52
- "eval_samples_per_second": 49.305,
53
- "eval_steps_per_second": 1.541,
54
  "step": 30
55
  }
56
  ],
57
  "logging_steps": 10,
58
- "max_steps": 100,
59
  "num_input_tokens_seen": 0,
60
- "num_train_epochs": 1,
61
  "save_steps": 10,
62
  "stateful_callbacks": {
63
  "TrainerControl": {
 
10
  "log_history": [
11
  {
12
  "epoch": 0.021739130434782608,
13
+ "grad_norm": 2.2049312591552734,
14
+ "learning_rate": 4.963768115942029e-05,
15
+ "loss": 2.3459,
16
  "step": 10
17
  },
18
  {
19
  "epoch": 0.021739130434782608,
20
+ "eval_loss": 1.5330805778503418,
21
+ "eval_runtime": 9.6608,
22
+ "eval_samples_per_second": 49.685,
23
+ "eval_steps_per_second": 1.553,
24
  "step": 10
25
  },
26
  {
27
  "epoch": 0.043478260869565216,
28
+ "grad_norm": 1.1927831172943115,
29
+ "learning_rate": 4.9275362318840584e-05,
30
+ "loss": 1.1669,
31
  "step": 20
32
  },
33
  {
34
  "epoch": 0.043478260869565216,
35
+ "eval_loss": 1.0271039009094238,
36
+ "eval_runtime": 9.7215,
37
+ "eval_samples_per_second": 49.375,
38
+ "eval_steps_per_second": 1.543,
39
  "step": 20
40
  },
41
  {
42
  "epoch": 0.06521739130434782,
43
+ "grad_norm": 1.0509072542190552,
44
+ "learning_rate": 4.891304347826087e-05,
45
+ "loss": 0.8327,
46
  "step": 30
47
  },
48
  {
49
  "epoch": 0.06521739130434782,
50
+ "eval_loss": 0.9039635062217712,
51
+ "eval_runtime": 9.7818,
52
+ "eval_samples_per_second": 49.071,
53
+ "eval_steps_per_second": 1.533,
54
  "step": 30
55
  }
56
  ],
57
  "logging_steps": 10,
58
+ "max_steps": 1380,
59
  "num_input_tokens_seen": 0,
60
+ "num_train_epochs": 3,
61
  "save_steps": 10,
62
  "stateful_callbacks": {
63
  "TrainerControl": {
checkpoint-30/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:17a1a7c75c85fbba54f1e97b2cee73c167834890dd0df62950989daaf8975814
3
  size 5816
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cdf2cbd96a7d8012e1f3da0569783099ba26a3ecbec680ff36cde09793d0889
3
  size 5816