lapp0 commited on
Commit
20fe92b
·
verified ·
1 Parent(s): 4a57b78

End of training

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ benchmarks.shelve.dat filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: gpt2
3
+ datasets:
4
+ - wikimedia/wikipedia
5
+ library_name: Distily
6
+ license: mit
7
+ tags:
8
+ - bitnet
9
+ - 1.58b
10
+ - generated_from_trainer
11
+ model-index:
12
+ - name: verify_v0.3.0
13
+ results: []
14
+ ---
15
+
16
+
17
+ # Summary
18
+
19
+ Distilled with [Distily](https://github.com/lapp0/distily) library
20
+ using teacher model [gpt2](https://huggingface.co/gpt2)
21
+ on dataset [wikimedia/wikipedia](https://huggingface.co/datasets/wikimedia/wikipedia).
22
+
23
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
24
+ should probably proofread and complete it, then remove this comment.
25
+
26
+ # Model description
27
+
28
+ More information needed
29
+
30
+ # Intended uses & limitations
31
+
32
+ More information needed
33
+ -->
34
+
35
+ # Model Architecture:
36
+ - **Architecture**: `GPT2LMHeadModel`
37
+ - **Total Parameters**: 124,439,808
38
+ - **Data Type (dtype)**: torch.bfloat16
39
+ - **Model Size**: 0.24 GB
40
+
41
+
42
+ # Benchmark Metrics Comparison
43
+
44
+ | Metric | dataset_sample_size=1000 | teacher |
45
+ | :--- | :--- | :--- |
46
+ | ai2_arc (acc) | 0.225 | 0.304 |
47
+ | ai2_arc (acc_norm) | 0.251 | 0.309 |
48
+ | ai2_arc (acc_norm_stderr) | | 0.01 |
49
+ | ai2_arc (acc_stderr) | | 0.01 |
50
+ | arc_challenge (acc) | 0.182 | 0.184 |
51
+ | arc_challenge (acc_norm) | 0.223 | 0.214 |
52
+ | arc_challenge (acc_norm_stderr) | | 0.013 |
53
+ | arc_challenge (acc_stderr) | | 0.012 |
54
+ | arc_easy (acc) | 0.268 | 0.424 |
55
+ | arc_easy (acc_norm) | 0.278 | 0.405 |
56
+ | arc_easy (acc_norm_stderr) | | 0.016 |
57
+ | arc_easy (acc_stderr) | | 0.016 |
58
+ | boolq (acc) | 0.375 | 0.541 |
59
+ | boolq (acc_stderr) | | 0.016 |
60
+ | cola (mcc) | 0.0 | 0.009 |
61
+ | cola (mcc_stderr) | | 0.032 |
62
+ | glue (acc) | 0.477 | 0.41 |
63
+ | glue (acc_stderr) | | 0.006 |
64
+ | glue (f1) | 0.0 | 0.526 |
65
+ | glue (f1_stderr) | | 0.014 |
66
+ | glue (mcc) | 0.0 | 0.009 |
67
+ | glue (mcc_stderr) | | 0.032 |
68
+ | hellaswag (acc) | 0.287 | 0.337 |
69
+ | hellaswag (acc_norm) | 0.269 | 0.384 |
70
+ | hellaswag (acc_norm_stderr) | | 0.015 |
71
+ | hellaswag (acc_stderr) | | 0.015 |
72
+ | mnli (acc) | 0.335 | 0.323 |
73
+ | mnli (acc_stderr) | | 0.015 |
74
+ | mnli_mismatch (acc) | 0.357 | 0.344 |
75
+ | mnli_mismatch (acc_stderr) | | 0.015 |
76
+ | mrpc (acc) | 0.316 | 0.515 |
77
+ | mrpc (acc_stderr) | | 0.025 |
78
+ | mrpc (f1) | 0.0 | 0.631 |
79
+ | mrpc (f1_stderr) | | 0.024 |
80
+ | qnli (acc) | 0.527 | 0.472 |
81
+ | qnli (acc_stderr) | | 0.016 |
82
+ | qqp (acc) | 0.673 | 0.34 |
83
+ | qqp (acc_stderr) | | 0.015 |
84
+ | qqp (f1) | 0.0 | 0.483 |
85
+ | qqp (f1_stderr) | | 0.017 |
86
+ | rte (acc) | 0.527 | 0.516 |
87
+ | rte (acc_stderr) | | 0.03 |
88
+ | sst2 (acc) | 0.557 | 0.511 |
89
+ | sst2 (acc_stderr) | | 0.017 |
90
+ | wikitext (bits_per_byte) | 1.979 | |
91
+ | wikitext (byte_perplexity) | 3.942 | |
92
+ | wikitext (word_perplexity) | 1533.0 | |
93
+ | wnli (acc) | 0.437 | 0.451 |
94
+ | wnli (acc_stderr) | | 0.059 |
95
+
96
+ # Resource Usage Comparison
97
+
98
+ - VRAM Use: 7.4923 GB
99
+
100
+ # Distillation (Teacher -> Student) Architecture Difference:
101
+
102
+ - **Architecture**: `GPT2LMHeadModel` -> `GPT2LMHeadModel`
103
+ - **Total Parameters**: 124,439,808 -> 124,439,808
104
+ - **Data Type (dtype)**: torch.bfloat16 -> torch.bfloat16
105
+ - **Model Size**: 0.24 GB -> 0.24 GB
106
+
107
+ <details>
108
+ <summary>Module Diff Details</summary>
109
+
110
+ ```diff
111
+
112
+ ```
113
+
114
+ </details>
115
+ <br/>
116
+
117
+ # Train Dataset
118
+ Trained on 923,203 tokens from the [wikimedia/wikipedia](https://huggingface.co/datasets/wikimedia/wikipedia) dataset.
119
+
120
+ - Num Samples: `990`
121
+ - Subset: `20231101.en`
122
+ - Split: `train`
123
+
124
+
125
+ # Training Objective
126
+
127
+ ```
128
+ DistillationObjective(logits_loss_component=LossComponent(label=logits, weight=1, loss_fn=kl))
129
+ ```
130
+
131
+ # Hyperparameters
132
+ The following hyperparameters were used during training:
133
+
134
+ <details>
135
+ <summary>Expand</summary>
136
+
137
+ - learning_rate: `0.0001`
138
+ - train_batch_size: `4`
139
+ - eval_batch_size: `8`
140
+ - seed: `42`
141
+ - optimizer: `Adam with betas=(0.9,0.999) and epsilon=1e-08`
142
+ - lr_scheduler_type: `constant`
143
+ - lr_scheduler_warmup_ratio: `0.2`
144
+ - num_epochs: `1.0`
145
+ - distillation_objective: `DistillationObjective(logits_loss_component=LossComponent(label=logits, weight=1, loss_fn=kl))`
146
+ - train_embeddings: `True`
147
+ - lr_scheduler: `<torch.optim.lr_scheduler.LambdaLR object at 0x7ff7e81bb7c0>`
148
+ - student_model_name_or_path: `None`
149
+ - student_config_name_or_path: `None`
150
+ - student_model_config: `None`
151
+ - reinitialize_weights: `None`
152
+ - copy_teacher_modules: `[('lm_head', False)]`
153
+ - student_model_as_bitnet: `True`
154
+ - student_model_compile: `False`
155
+ - dropout: `None`
156
+ - teacher_model_name_or_path: `gpt2`
157
+ - teacher_load_in_8bit: `False`
158
+ - teacher_load_in_4bit: `False`
159
+ - teacher_model_compile: `False`
160
+ - dataset_uri: `wikimedia/wikipedia`
161
+ - dataset_subset: `20231101.en`
162
+ - dataset_split: `train`
163
+ - dataset_column_name: `text`
164
+ - dataset_sample_size: `1000`
165
+ - dataset_test_size: `0.01`
166
+ - gradient_accumulation_steps: `1`
167
+ - weight_decay: `0.0`
168
+ - max_grad_norm: `1.0`
169
+ - warmup_ratio: `0.2`
170
+ - warmup_steps: `0`
171
+ - gradient_checkpointing: `True`
172
+
173
+ </details>
174
+ <br/>
175
+
176
+
177
+ # Framework Versions
178
+ - Distily 0.3.0
179
+ - Transformers 4.44.2
180
+ - Pytorch 2.3.0
181
+ - Datasets 2.21.0
benchmarks.shelve.bak CHANGED
@@ -0,0 +1,2 @@
 
 
 
1
+ 'teacher', (0, 10615480)
2
+ 'dataset_sample_size=1000', (21231616, 14412274)
benchmarks.shelve.dat CHANGED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30434f3de6eae04ceb24ef9fe546e50d7541199d28f4655817933141a8dff4dc
3
+ size 35643890
benchmarks.shelve.dir CHANGED
@@ -0,0 +1,2 @@
 
 
 
1
+ 'teacher', (0, 10615480)
2
+ 'dataset_sample_size=1000', (21231616, 14412274)
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.44.2"
6
+ }
logs/dataset_sample_size=1000/events.out.tfevents.1724440642.3cea3f0a07ac ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87fb870f5c0bae56c8aac6e7ea75e94686e6fa0097890cfceba7899aab146d6d
3
+ size 121924
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9af3291dd4cbac21613c165049d520fda93cf53370cd1ea5941993f03489236f
3
  size 248894656
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78285be3f6d4aa0f1b9b8b89207ba60ebc48eb54144d92aeeddc8f6926b42fc8
3
  size 248894656
tokenizer.json CHANGED
@@ -1,19 +1,7 @@
1
  {
2
  "version": "1.0",
3
- "truncation": {
4
- "direction": "Right",
5
- "max_length": 1023,
6
- "strategy": "LongestFirst",
7
- "stride": 0
8
- },
9
- "padding": {
10
- "strategy": "BatchLongest",
11
- "direction": "Right",
12
- "pad_to_multiple_of": null,
13
- "pad_id": 50256,
14
- "pad_type_id": 0,
15
- "pad_token": "<|endoftext|>"
16
- },
17
  "added_tokens": [
18
  {
19
  "id": 50256,
 
1
  {
2
  "version": "1.0",
3
+ "truncation": null,
4
+ "padding": null,
 
 
 
 
 
 
 
 
 
 
 
 
5
  "added_tokens": [
6
  {
7
  "id": 50256,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6e12706a2446185c3da1e99a8553b154deb540f8d6bf4bb08479b7ee07fb2f74
3
  size 5304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbb4bc3ee22829abc22c00a091f9e14cf2db309ad905afdb5fa4267bb736b6fc
3
  size 5304