ayjays132 commited on
Commit
9c2c62a
·
verified ·
1 Parent(s): bc876cd

Upload 6 files

Browse files
Files changed (2) hide show
  1. config.json +13 -13
  2. trainer_state.json +48 -48
config.json CHANGED
@@ -5,7 +5,7 @@
5
  "GPT2LMHeadModel"
6
  ],
7
  "attn_pdrop": 0.1,
8
- "bos_token_id": 100256,
9
  "config": {
10
  "activation_function": "gelu_new",
11
  "attn_pdrop": 0.1,
@@ -13,18 +13,18 @@
13
  "gradient_checkpointing": true,
14
  "initializer_range": 0.02,
15
  "layer_norm_epsilon": 1e-05,
16
- "n_ctx": 1080,
17
- "n_embd": 1080,
18
- "n_head": 22,
19
  "n_layer": 36,
20
- "n_positions": 1080,
21
  "resid_pdrop": 0.1,
22
  "scale_attn_weights": true,
23
  "use_cache": true,
24
- "vocab_size": 100257
25
  },
26
  "embd_pdrop": 0.1,
27
- "eos_token_id": 100256,
28
  "initializer_range": 0.02,
29
  "language": "en",
30
  "layer_norm_epsilon": 1e-05,
@@ -35,10 +35,10 @@
35
  "accuracy"
36
  ],
37
  "model_type": "gpt2",
38
- "n_embd": 1000,
39
- "n_head": 15,
40
  "n_inner": null,
41
- "n_layer": 15,
42
  "n_positions": 1024,
43
  "pipeline_tag": "conversational",
44
  "reorder_and_upcast_attn": false,
@@ -71,14 +71,14 @@
71
  }
72
  },
73
  "tokenizer_config": {
74
- "bos_token_id": 100256,
75
  "eos_token_id": 50256,
76
- "n_positions": 1080,
77
  "padding_side": "left",
78
  "truncation_side": "right"
79
  },
80
  "torch_dtype": "float32",
81
  "transformers_version": "4.37.2",
82
  "use_cache": true,
83
- "vocab_size": 100257
84
  }
 
5
  "GPT2LMHeadModel"
6
  ],
7
  "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
  "config": {
10
  "activation_function": "gelu_new",
11
  "attn_pdrop": 0.1,
 
13
  "gradient_checkpointing": true,
14
  "initializer_range": 0.02,
15
  "layer_norm_epsilon": 1e-05,
16
+ "n_ctx": 2048,
17
+ "n_embd": 2048,
18
+ "n_head": 16,
19
  "n_layer": 36,
20
+ "n_positions": 2048,
21
  "resid_pdrop": 0.1,
22
  "scale_attn_weights": true,
23
  "use_cache": true,
24
+ "vocab_size": 50257
25
  },
26
  "embd_pdrop": 0.1,
27
+ "eos_token_id": 50256,
28
  "initializer_range": 0.02,
29
  "language": "en",
30
  "layer_norm_epsilon": 1e-05,
 
35
  "accuracy"
36
  ],
37
  "model_type": "gpt2",
38
+ "n_embd": 768,
39
+ "n_head": 12,
40
  "n_inner": null,
41
+ "n_layer": 12,
42
  "n_positions": 1024,
43
  "pipeline_tag": "conversational",
44
  "reorder_and_upcast_attn": false,
 
71
  }
72
  },
73
  "tokenizer_config": {
74
+ "bos_token_id": 50256,
75
  "eos_token_id": 50256,
76
+ "n_positions": 2048,
77
  "padding_side": "left",
78
  "truncation_side": "right"
79
  },
80
  "torch_dtype": "float32",
81
  "transformers_version": "4.37.2",
82
  "use_cache": true,
83
+ "vocab_size": 50257
84
  }
trainer_state.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 2.1491510853212983,
5
  "eval_steps": 500,
6
  "global_step": 10000,
7
  "is_hyper_param_search": false,
@@ -10,131 +10,131 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.11,
13
- "learning_rate": 4.820904076223225e-05,
14
- "loss": 1.1625,
15
  "step": 500
16
  },
17
  {
18
  "epoch": 0.21,
19
- "learning_rate": 4.641808152446451e-05,
20
- "loss": 1.1918,
21
  "step": 1000
22
  },
23
  {
24
  "epoch": 0.32,
25
- "learning_rate": 4.462712228669675e-05,
26
- "loss": 1.2037,
27
  "step": 1500
28
  },
29
  {
30
  "epoch": 0.43,
31
- "learning_rate": 4.283616304892901e-05,
32
- "loss": 1.2352,
33
  "step": 2000
34
  },
35
  {
36
  "epoch": 0.54,
37
- "learning_rate": 4.104520381116126e-05,
38
- "loss": 1.2375,
39
  "step": 2500
40
  },
41
  {
42
  "epoch": 0.64,
43
- "learning_rate": 3.9254244573393514e-05,
44
- "loss": 1.2305,
45
  "step": 3000
46
  },
47
  {
48
  "epoch": 0.75,
49
- "learning_rate": 3.746328533562576e-05,
50
- "loss": 1.2822,
51
  "step": 3500
52
  },
53
  {
54
  "epoch": 0.86,
55
- "learning_rate": 3.567232609785802e-05,
56
- "loss": 1.2733,
57
  "step": 4000
58
  },
59
  {
60
- "epoch": 0.97,
61
- "learning_rate": 3.388136686009026e-05,
62
- "loss": 1.2598,
63
  "step": 4500
64
  },
65
  {
66
  "epoch": 1.07,
67
- "learning_rate": 3.209040762232252e-05,
68
- "loss": 1.1006,
69
  "step": 5000
70
  },
71
  {
72
  "epoch": 1.18,
73
- "learning_rate": 3.029944838455477e-05,
74
- "loss": 1.0509,
75
  "step": 5500
76
  },
77
  {
78
  "epoch": 1.29,
79
- "learning_rate": 2.850848914678702e-05,
80
- "loss": 1.0549,
81
  "step": 6000
82
  },
83
  {
84
- "epoch": 1.4,
85
- "learning_rate": 2.6717529909019275e-05,
86
- "loss": 1.0917,
87
  "step": 6500
88
  },
89
  {
90
  "epoch": 1.5,
91
- "learning_rate": 2.4926570671251524e-05,
92
- "loss": 1.1086,
93
  "step": 7000
94
  },
95
  {
96
  "epoch": 1.61,
97
- "learning_rate": 2.3135611433483774e-05,
98
- "loss": 1.0815,
99
  "step": 7500
100
  },
101
  {
102
  "epoch": 1.72,
103
- "learning_rate": 2.1344652195716027e-05,
104
- "loss": 1.0995,
105
  "step": 8000
106
  },
107
  {
108
- "epoch": 1.83,
109
- "learning_rate": 1.9553692957948277e-05,
110
- "loss": 1.1132,
111
  "step": 8500
112
  },
113
  {
114
  "epoch": 1.93,
115
- "learning_rate": 1.776273372018053e-05,
116
- "loss": 1.075,
117
  "step": 9000
118
  },
119
  {
120
  "epoch": 2.04,
121
- "learning_rate": 1.5971774482412783e-05,
122
- "loss": 1.054,
123
  "step": 9500
124
  },
125
  {
126
- "epoch": 2.15,
127
- "learning_rate": 1.4180815244645032e-05,
128
- "loss": 0.9583,
129
  "step": 10000
130
  }
131
  ],
132
  "logging_steps": 500,
133
- "max_steps": 13959,
134
  "num_input_tokens_seen": 0,
135
- "num_train_epochs": 3,
136
  "save_steps": 10000,
137
- "total_flos": 2.0902317391872e+16,
138
  "train_batch_size": 8,
139
  "trial_name": null,
140
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 2.144082332761578,
5
  "eval_steps": 500,
6
  "global_step": 10000,
7
  "is_hyper_param_search": false,
 
10
  "log_history": [
11
  {
12
  "epoch": 0.11,
13
+ "learning_rate": 4.951270856073601e-05,
14
+ "loss": 1.4381,
15
  "step": 500
16
  },
17
  {
18
  "epoch": 0.21,
19
+ "learning_rate": 4.902541712147201e-05,
20
+ "loss": 1.3041,
21
  "step": 1000
22
  },
23
  {
24
  "epoch": 0.32,
25
+ "learning_rate": 4.853812568220802e-05,
26
+ "loss": 1.2816,
27
  "step": 1500
28
  },
29
  {
30
  "epoch": 0.43,
31
+ "learning_rate": 4.805083424294402e-05,
32
+ "loss": 1.2974,
33
  "step": 2000
34
  },
35
  {
36
  "epoch": 0.54,
37
+ "learning_rate": 4.756354280368003e-05,
38
+ "loss": 1.2876,
39
  "step": 2500
40
  },
41
  {
42
  "epoch": 0.64,
43
+ "learning_rate": 4.707625136441603e-05,
44
+ "loss": 1.3029,
45
  "step": 3000
46
  },
47
  {
48
  "epoch": 0.75,
49
+ "learning_rate": 4.6588959925152037e-05,
50
+ "loss": 1.301,
51
  "step": 3500
52
  },
53
  {
54
  "epoch": 0.86,
55
+ "learning_rate": 4.610166848588804e-05,
56
+ "loss": 1.3153,
57
  "step": 4000
58
  },
59
  {
60
+ "epoch": 0.96,
61
+ "learning_rate": 4.5614377046624046e-05,
62
+ "loss": 1.2989,
63
  "step": 4500
64
  },
65
  {
66
  "epoch": 1.07,
67
+ "learning_rate": 4.512708560736005e-05,
68
+ "loss": 1.1589,
69
  "step": 5000
70
  },
71
  {
72
  "epoch": 1.18,
73
+ "learning_rate": 4.4639794168096056e-05,
74
+ "loss": 1.1053,
75
  "step": 5500
76
  },
77
  {
78
  "epoch": 1.29,
79
+ "learning_rate": 4.415250272883206e-05,
80
+ "loss": 1.1314,
81
  "step": 6000
82
  },
83
  {
84
+ "epoch": 1.39,
85
+ "learning_rate": 4.3665211289568066e-05,
86
+ "loss": 1.1446,
87
  "step": 6500
88
  },
89
  {
90
  "epoch": 1.5,
91
+ "learning_rate": 4.317791985030407e-05,
92
+ "loss": 1.1701,
93
  "step": 7000
94
  },
95
  {
96
  "epoch": 1.61,
97
+ "learning_rate": 4.269062841104008e-05,
98
+ "loss": 1.1702,
99
  "step": 7500
100
  },
101
  {
102
  "epoch": 1.72,
103
+ "learning_rate": 4.220333697177608e-05,
104
+ "loss": 1.1764,
105
  "step": 8000
106
  },
107
  {
108
+ "epoch": 1.82,
109
+ "learning_rate": 4.1716045532512085e-05,
110
+ "loss": 1.1933,
111
  "step": 8500
112
  },
113
  {
114
  "epoch": 1.93,
115
+ "learning_rate": 4.122875409324809e-05,
116
+ "loss": 1.1964,
117
  "step": 9000
118
  },
119
  {
120
  "epoch": 2.04,
121
+ "learning_rate": 4.0741462653984095e-05,
122
+ "loss": 1.1274,
123
  "step": 9500
124
  },
125
  {
126
+ "epoch": 2.14,
127
+ "learning_rate": 4.025417121472011e-05,
128
+ "loss": 1.0048,
129
  "step": 10000
130
  }
131
  ],
132
  "logging_steps": 500,
133
+ "max_steps": 51304,
134
  "num_input_tokens_seen": 0,
135
+ "num_train_epochs": 11,
136
  "save_steps": 10000,
137
+ "total_flos": 2.0901272223744e+16,
138
  "train_batch_size": 8,
139
  "trial_name": null,
140
  "trial_params": null