chauhoang commited on
Commit
a4d1751
·
verified ·
1 Parent(s): 574c897

Training in progress, step 1, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -20,13 +20,13 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "k_proj",
24
- "gate_proj",
25
  "down_proj",
26
  "o_proj",
27
  "up_proj",
28
- "v_proj",
29
- "q_proj"
 
 
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
 
 
23
  "down_proj",
24
  "o_proj",
25
  "up_proj",
26
+ "gate_proj",
27
+ "q_proj",
28
+ "k_proj",
29
+ "v_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
32
  "use_dora": false,
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2daa5bd55bedf198d1d64977bc8928cfc4ab1338646ed22b35c439ed81cc47ca
3
  size 22573704
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b071be95eb2e1fa59e3d854e8765b01cdaa7bfe88136978cec3e9867389328f7
3
  size 22573704
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6011b229cf7932128024efaacc2097c7654c6fd396ad6ac1ebed40b7d7000e61
3
  size 11710970
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:41697e6396c5d85ff41eb38449bbeb93bce2966915a50942504fcf7aff33f712
3
  size 11710970
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d82b625db56507f447e53f457cd40a516407fb4c508f6a80ad29dded2571bb42
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e96084db0d88b3f6f054fd8e1ee389d8068f68911086b40d9534c6441e1738ec
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b1df0528620c07325b8faa7567e59b0c1e86a1f1ee6af1245a69c6c0463fe4e2
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ae751897b8e87ff08962a91d1d3485984775a96aa89e29a1caac3d6f449228f7
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.02632964718272775,
5
- "eval_steps": 10,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -11,127 +11,17 @@
11
  {
12
  "epoch": 0.000526592943654555,
13
  "eval_loss": 10.178997039794922,
14
- "eval_runtime": 21.5369,
15
- "eval_samples_per_second": 37.146,
16
- "eval_steps_per_second": 18.573,
17
  "step": 1
18
- },
19
- {
20
- "epoch": 0.0026329647182727752,
21
- "grad_norm": 21.805500030517578,
22
- "learning_rate": 5e-05,
23
- "loss": 10.0321,
24
- "step": 5
25
- },
26
- {
27
- "epoch": 0.0052659294365455505,
28
- "grad_norm": 24.200536727905273,
29
- "learning_rate": 0.0001,
30
- "loss": 7.6786,
31
- "step": 10
32
- },
33
- {
34
- "epoch": 0.0052659294365455505,
35
- "eval_loss": 3.4688384532928467,
36
- "eval_runtime": 21.6215,
37
- "eval_samples_per_second": 37.0,
38
- "eval_steps_per_second": 18.5,
39
- "step": 10
40
- },
41
- {
42
- "epoch": 0.007898894154818325,
43
- "grad_norm": 1.099424123764038,
44
- "learning_rate": 9.619397662556435e-05,
45
- "loss": 1.3599,
46
- "step": 15
47
- },
48
- {
49
- "epoch": 0.010531858873091101,
50
- "grad_norm": 0.0010866125812754035,
51
- "learning_rate": 8.535533905932738e-05,
52
- "loss": 0.0009,
53
- "step": 20
54
- },
55
- {
56
- "epoch": 0.010531858873091101,
57
- "eval_loss": 4.6386987378355116e-05,
58
- "eval_runtime": 21.566,
59
- "eval_samples_per_second": 37.095,
60
- "eval_steps_per_second": 18.548,
61
- "step": 20
62
- },
63
- {
64
- "epoch": 0.013164823591363875,
65
- "grad_norm": 0.0039704530499875546,
66
- "learning_rate": 6.91341716182545e-05,
67
- "loss": 0.0,
68
- "step": 25
69
- },
70
- {
71
- "epoch": 0.01579778830963665,
72
- "grad_norm": 0.00037722557317465544,
73
- "learning_rate": 5e-05,
74
- "loss": 0.0,
75
- "step": 30
76
- },
77
- {
78
- "epoch": 0.01579778830963665,
79
- "eval_loss": 3.998309239250375e-06,
80
- "eval_runtime": 21.6363,
81
- "eval_samples_per_second": 36.975,
82
- "eval_steps_per_second": 18.487,
83
- "step": 30
84
- },
85
- {
86
- "epoch": 0.018430753027909426,
87
- "grad_norm": 0.0013271215138956904,
88
- "learning_rate": 3.086582838174551e-05,
89
- "loss": 0.0,
90
- "step": 35
91
- },
92
- {
93
- "epoch": 0.021063717746182202,
94
- "grad_norm": 0.0011808726703748107,
95
- "learning_rate": 1.4644660940672627e-05,
96
- "loss": 0.0,
97
- "step": 40
98
- },
99
- {
100
- "epoch": 0.021063717746182202,
101
- "eval_loss": 3.2110322081280174e-06,
102
- "eval_runtime": 21.2044,
103
- "eval_samples_per_second": 37.728,
104
- "eval_steps_per_second": 18.864,
105
- "step": 40
106
- },
107
- {
108
- "epoch": 0.023696682464454975,
109
- "grad_norm": 0.0005120717105455697,
110
- "learning_rate": 3.8060233744356633e-06,
111
- "loss": 0.0,
112
- "step": 45
113
- },
114
- {
115
- "epoch": 0.02632964718272775,
116
- "grad_norm": 0.0004391983966343105,
117
- "learning_rate": 0.0,
118
- "loss": 0.0,
119
- "step": 50
120
- },
121
- {
122
- "epoch": 0.02632964718272775,
123
- "eval_loss": 3.196502120772493e-06,
124
- "eval_runtime": 21.1976,
125
- "eval_samples_per_second": 37.74,
126
- "eval_steps_per_second": 18.87,
127
- "step": 50
128
  }
129
  ],
130
  "logging_steps": 5,
131
- "max_steps": 50,
132
  "num_input_tokens_seen": 0,
133
  "num_train_epochs": 1,
134
- "save_steps": 13,
135
  "stateful_callbacks": {
136
  "TrainerControl": {
137
  "args": {
@@ -144,7 +34,7 @@
144
  "attributes": {}
145
  }
146
  },
147
- "total_flos": 1202727577190400.0,
148
  "train_batch_size": 2,
149
  "trial_name": null,
150
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.000526592943654555,
5
+ "eval_steps": 1,
6
+ "global_step": 1,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
11
  {
12
  "epoch": 0.000526592943654555,
13
  "eval_loss": 10.178997039794922,
14
+ "eval_runtime": 20.8513,
15
+ "eval_samples_per_second": 38.367,
16
+ "eval_steps_per_second": 19.183,
17
  "step": 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  }
19
  ],
20
  "logging_steps": 5,
21
+ "max_steps": 1,
22
  "num_input_tokens_seen": 0,
23
  "num_train_epochs": 1,
24
+ "save_steps": 1,
25
  "stateful_callbacks": {
26
  "TrainerControl": {
27
  "args": {
 
34
  "attributes": {}
35
  }
36
  },
37
+ "total_flos": 24054551543808.0,
38
  "train_batch_size": 2,
39
  "trial_name": null,
40
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6dbfaf21602a949fb6188f79d43407ad28956e1d0fef6098b9e75a042893d463
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ed38aa868afb29637e8105f6176eccb4c2984beb09e4927af87d52267ef29a7
3
  size 6776