second commit
Browse files- all_results.json +7 -7
- generated_predictions.jsonl +0 -0
- llamaboard_config.yaml +12 -57
- predict_results.json +9 -0
- running_log.txt +89 -553
- trainer_log.jsonl +15 -191
- training_args.yaml +9 -21
all_results.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
-
"
|
3 |
-
"
|
4 |
-
"
|
5 |
-
"
|
6 |
-
"
|
7 |
-
"
|
8 |
-
"
|
9 |
}
|
|
|
1 |
{
|
2 |
+
"predict_bleu-4": 83.9079045886076,
|
3 |
+
"predict_rouge-1": 89.87341772151899,
|
4 |
+
"predict_rouge-2": 0.0,
|
5 |
+
"predict_rouge-l": 89.87341772151899,
|
6 |
+
"predict_runtime": 9.6133,
|
7 |
+
"predict_samples_per_second": 130.34,
|
8 |
+
"predict_steps_per_second": 8.218
|
9 |
}
|
generated_predictions.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llamaboard_config.yaml
CHANGED
@@ -1,5 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
top.booster: auto
|
2 |
-
top.checkpoint_path:
|
3 |
top.finetuning_type: full
|
4 |
top.model_name: LLaMA3-8B-Chat
|
5 |
top.quantization_bit: none
|
@@ -7,59 +18,3 @@ top.quantization_method: bitsandbytes
|
|
7 |
top.rope_scaling: none
|
8 |
top.template: llama3
|
9 |
top.visual_inputs: false
|
10 |
-
train.additional_target: ''
|
11 |
-
train.badam_mode: layer
|
12 |
-
train.badam_switch_interval: 50
|
13 |
-
train.badam_switch_mode: ascending
|
14 |
-
train.badam_update_ratio: 0.05
|
15 |
-
train.batch_size: 2
|
16 |
-
train.compute_type: bf16
|
17 |
-
train.create_new_adapter: false
|
18 |
-
train.cutoff_len: 1024
|
19 |
-
train.dataset:
|
20 |
-
- truth_train_0716_2
|
21 |
-
train.dataset_dir: data
|
22 |
-
train.ds_offload: false
|
23 |
-
train.ds_stage: '2'
|
24 |
-
train.freeze_extra_modules: ''
|
25 |
-
train.freeze_trainable_layers: 2
|
26 |
-
train.freeze_trainable_modules: all
|
27 |
-
train.galore_rank: 16
|
28 |
-
train.galore_scale: 0.25
|
29 |
-
train.galore_target: all
|
30 |
-
train.galore_update_interval: 200
|
31 |
-
train.gradient_accumulation_steps: 8
|
32 |
-
train.learning_rate: 5e-6
|
33 |
-
train.logging_steps: 1
|
34 |
-
train.lora_alpha: 16
|
35 |
-
train.lora_dropout: 0
|
36 |
-
train.lora_rank: 8
|
37 |
-
train.lora_target: ''
|
38 |
-
train.loraplus_lr_ratio: 0
|
39 |
-
train.lr_scheduler_type: cosine
|
40 |
-
train.max_grad_norm: '1.0'
|
41 |
-
train.max_samples: '100000'
|
42 |
-
train.neat_packing: false
|
43 |
-
train.neftune_alpha: 0
|
44 |
-
train.num_train_epochs: '5.0'
|
45 |
-
train.optim: adamw_torch
|
46 |
-
train.packing: false
|
47 |
-
train.ppo_score_norm: false
|
48 |
-
train.ppo_whiten_rewards: false
|
49 |
-
train.pref_beta: 0.1
|
50 |
-
train.pref_ftx: 0
|
51 |
-
train.pref_loss: sigmoid
|
52 |
-
train.report_to: false
|
53 |
-
train.resize_vocab: false
|
54 |
-
train.reward_model: null
|
55 |
-
train.save_steps: 1000
|
56 |
-
train.shift_attn: false
|
57 |
-
train.training_stage: Supervised Fine-Tuning
|
58 |
-
train.use_badam: false
|
59 |
-
train.use_dora: false
|
60 |
-
train.use_galore: false
|
61 |
-
train.use_llama_pro: false
|
62 |
-
train.use_pissa: false
|
63 |
-
train.use_rslora: false
|
64 |
-
train.val_size: 0
|
65 |
-
train.warmup_steps: 10
|
|
|
1 |
+
eval.batch_size: 2
|
2 |
+
eval.cutoff_len: 1024
|
3 |
+
eval.dataset:
|
4 |
+
- truth_dev_0716_2
|
5 |
+
eval.dataset_dir: data
|
6 |
+
eval.max_new_tokens: 512
|
7 |
+
eval.max_samples: '100000'
|
8 |
+
eval.output_dir: eval_2024-07-16-16-45-32
|
9 |
+
eval.predict: true
|
10 |
+
eval.temperature: 0.95
|
11 |
+
eval.top_p: 0.7
|
12 |
top.booster: auto
|
13 |
+
top.checkpoint_path: train_2024-07-16-15-59-42_llama3_2
|
14 |
top.finetuning_type: full
|
15 |
top.model_name: LLaMA3-8B-Chat
|
16 |
top.quantization_bit: none
|
|
|
18 |
top.rope_scaling: none
|
19 |
top.template: llama3
|
20 |
top.visual_inputs: false
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
predict_results.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"predict_bleu-4": 83.9079045886076,
|
3 |
+
"predict_rouge-1": 89.87341772151899,
|
4 |
+
"predict_rouge-2": 0.0,
|
5 |
+
"predict_rouge-l": 89.87341772151899,
|
6 |
+
"predict_runtime": 9.6133,
|
7 |
+
"predict_samples_per_second": 130.34,
|
8 |
+
"predict_steps_per_second": 8.218
|
9 |
+
}
|
running_log.txt
CHANGED
@@ -1,95 +1,73 @@
|
|
1 |
-
07/16/2024 16:
|
2 |
|
3 |
-
07/16/2024 16:
|
4 |
|
5 |
-
07/16/2024 16:
|
6 |
|
7 |
-
07/16/2024 16:
|
8 |
|
9 |
-
07
|
10 |
|
11 |
-
|
12 |
|
13 |
-
|
14 |
|
15 |
-
|
16 |
|
17 |
-
07/16/2024 16:
|
18 |
|
19 |
-
07/16/2024 16:
|
20 |
|
21 |
-
07/16/2024 16:
|
22 |
|
23 |
-
07/16/2024 16:
|
24 |
|
25 |
-
07
|
26 |
|
27 |
-
07
|
28 |
|
29 |
-
07
|
30 |
|
31 |
-
07
|
32 |
|
33 |
-
07
|
34 |
|
35 |
-
07/16/2024 16:
|
36 |
|
37 |
-
07/16/2024 16:
|
38 |
|
39 |
-
07/16/2024 16:
|
40 |
|
41 |
-
07/16/2024 16:
|
42 |
|
43 |
-
07/16/2024 16:
|
44 |
|
45 |
-
07/16/2024 16:
|
46 |
|
47 |
-
07/16/2024 16:
|
48 |
|
49 |
-
07/16/2024 16:
|
50 |
|
51 |
-
07/16/2024 16:
|
52 |
|
53 |
-
07/16/2024 16:
|
54 |
|
55 |
-
07/16/2024 16:
|
56 |
|
57 |
-
07/16/2024 16:
|
58 |
|
59 |
-
|
60 |
|
61 |
-
|
62 |
|
63 |
-
|
64 |
|
65 |
-
|
66 |
|
67 |
-
[
|
68 |
|
69 |
-
[INFO|
|
70 |
-
|
71 |
-
[INFO|template.py:372] 2024-07-16 16:01:00,656 >> Add pad token: <|eot_id|>
|
72 |
-
|
73 |
-
[INFO|loader.py:50] 2024-07-16 16:01:00,657 >> Loading dataset 0716_truthfulqa_benchmark_train_2.json...
|
74 |
-
|
75 |
-
07/16/2024 16:01:02 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_train_2.json...
|
76 |
-
|
77 |
-
07/16/2024 16:01:02 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_train_2.json...
|
78 |
-
|
79 |
-
07/16/2024 16:01:02 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_train_2.json...
|
80 |
-
|
81 |
-
07/16/2024 16:01:02 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_train_2.json...
|
82 |
-
|
83 |
-
07/16/2024 16:01:02 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_train_2.json...
|
84 |
-
|
85 |
-
07/16/2024 16:01:02 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_train_2.json...
|
86 |
-
|
87 |
-
07/16/2024 16:01:02 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_train_2.json...
|
88 |
-
|
89 |
-
[INFO|configuration_utils.py:733] 2024-07-16 16:01:06,083 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Meta-Llama-3-8B-Instruct/snapshots/e1945c40cd546c78e41f1151f4db032b271faeaa/config.json
|
90 |
-
|
91 |
-
[INFO|configuration_utils.py:800] 2024-07-16 16:01:06,086 >> Model config LlamaConfig {
|
92 |
-
"_name_or_path": "meta-llama/Meta-Llama-3-8B-Instruct",
|
93 |
"architectures": [
|
94 |
"LlamaForCausalLM"
|
95 |
],
|
@@ -114,30 +92,46 @@
|
|
114 |
"tie_word_embeddings": false,
|
115 |
"torch_dtype": "bfloat16",
|
116 |
"transformers_version": "4.42.3",
|
117 |
-
"use_cache":
|
118 |
"vocab_size": 128256
|
119 |
}
|
120 |
|
121 |
|
122 |
-
[INFO|
|
|
|
|
|
123 |
|
124 |
-
[INFO|modeling_utils.py:1531] 2024-07-16 16:
|
125 |
|
126 |
-
[INFO|configuration_utils.py:1000] 2024-07-16 16:
|
127 |
"bos_token_id": 128000,
|
128 |
"eos_token_id": 128009
|
129 |
}
|
130 |
|
131 |
|
132 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
133 |
|
|
|
134 |
|
135 |
-
|
|
|
|
|
|
|
|
|
|
|
136 |
If your task is similar to the task the model of the checkpoint was trained on, you can already use LlamaForCausalLM for predictions without further training.
|
137 |
|
138 |
-
[INFO|configuration_utils.py:
|
139 |
|
140 |
-
[INFO|configuration_utils.py:1000] 2024-07-16 16:
|
141 |
"bos_token_id": 128000,
|
142 |
"do_sample": true,
|
143 |
"eos_token_id": [
|
@@ -150,518 +144,60 @@ If your task is similar to the task the model of the checkpoint was trained on,
|
|
150 |
}
|
151 |
|
152 |
|
153 |
-
[INFO|
|
154 |
-
|
155 |
-
[INFO|attention.py:80] 2024-07-16 16:01:10,211 >> Using torch SDPA for faster training and inference.
|
156 |
-
|
157 |
-
[INFO|adapter.py:302] 2024-07-16 16:01:10,212 >> Upcasting trainable params to float32.
|
158 |
-
|
159 |
-
[INFO|adapter.py:48] 2024-07-16 16:01:10,212 >> Fine-tuning method: Full
|
160 |
-
|
161 |
-
[INFO|loader.py:196] 2024-07-16 16:01:10,254 >> trainable params: 8,030,261,248 || all params: 8,030,261,248 || trainable%: 100.0000
|
162 |
-
|
163 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.model_utils.checkpointing - Gradient checkpointing enabled.
|
164 |
-
|
165 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
166 |
-
|
167 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.adapter - Upcasting trainable params to float32.
|
168 |
-
|
169 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.adapter - Fine-tuning method: Full
|
170 |
-
|
171 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.model_utils.checkpointing - Gradient checkpointing enabled.
|
172 |
-
|
173 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
174 |
-
|
175 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.adapter - Upcasting trainable params to float32.
|
176 |
-
|
177 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.adapter - Fine-tuning method: Full
|
178 |
-
|
179 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.model_utils.checkpointing - Gradient checkpointing enabled.
|
180 |
-
|
181 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
182 |
-
|
183 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.adapter - Upcasting trainable params to float32.
|
184 |
-
|
185 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.adapter - Fine-tuning method: Full
|
186 |
-
|
187 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.loader - trainable params: 8,030,261,248 || all params: 8,030,261,248 || trainable%: 100.0000
|
188 |
-
|
189 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.loader - trainable params: 8,030,261,248 || all params: 8,030,261,248 || trainable%: 100.0000
|
190 |
-
|
191 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.model_utils.checkpointing - Gradient checkpointing enabled.
|
192 |
-
|
193 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
194 |
-
|
195 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.adapter - Upcasting trainable params to float32.
|
196 |
-
|
197 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.adapter - Fine-tuning method: Full
|
198 |
-
|
199 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.loader - trainable params: 8,030,261,248 || all params: 8,030,261,248 || trainable%: 100.0000
|
200 |
-
|
201 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.model_utils.checkpointing - Gradient checkpointing enabled.
|
202 |
-
|
203 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
204 |
-
|
205 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.adapter - Upcasting trainable params to float32.
|
206 |
-
|
207 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.adapter - Fine-tuning method: Full
|
208 |
-
|
209 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.loader - trainable params: 8,030,261,248 || all params: 8,030,261,248 || trainable%: 100.0000
|
210 |
-
|
211 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.loader - trainable params: 8,030,261,248 || all params: 8,030,261,248 || trainable%: 100.0000
|
212 |
-
|
213 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.model_utils.checkpointing - Gradient checkpointing enabled.
|
214 |
-
|
215 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
216 |
-
|
217 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.adapter - Upcasting trainable params to float32.
|
218 |
-
|
219 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.adapter - Fine-tuning method: Full
|
220 |
-
|
221 |
-
[INFO|trainer.py:642] 2024-07-16 16:01:10,260 >> Using auto half precision backend
|
222 |
-
|
223 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.loader - trainable params: 8,030,261,248 || all params: 8,030,261,248 || trainable%: 100.0000
|
224 |
-
|
225 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.model_utils.checkpointing - Gradient checkpointing enabled.
|
226 |
-
|
227 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
228 |
-
|
229 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.adapter - Upcasting trainable params to float32.
|
230 |
-
|
231 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.adapter - Fine-tuning method: Full
|
232 |
-
|
233 |
-
07/16/2024 16:01:10 - INFO - llamafactory.model.loader - trainable params: 8,030,261,248 || all params: 8,030,261,248 || trainable%: 100.0000
|
234 |
-
|
235 |
-
[INFO|trainer.py:2128] 2024-07-16 16:01:34,721 >> ***** Running training *****
|
236 |
-
|
237 |
-
[INFO|trainer.py:2129] 2024-07-16 16:01:34,721 >> Num examples = 4,958
|
238 |
-
|
239 |
-
[INFO|trainer.py:2130] 2024-07-16 16:01:34,721 >> Num Epochs = 5
|
240 |
-
|
241 |
-
[INFO|trainer.py:2131] 2024-07-16 16:01:34,721 >> Instantaneous batch size per device = 2
|
242 |
-
|
243 |
-
[INFO|trainer.py:2134] 2024-07-16 16:01:34,721 >> Total train batch size (w. parallel, distributed & accumulation) = 128
|
244 |
-
|
245 |
-
[INFO|trainer.py:2135] 2024-07-16 16:01:34,721 >> Gradient Accumulation steps = 8
|
246 |
-
|
247 |
-
[INFO|trainer.py:2136] 2024-07-16 16:01:34,721 >> Total optimization steps = 190
|
248 |
-
|
249 |
-
[INFO|trainer.py:2137] 2024-07-16 16:01:34,722 >> Number of trainable parameters = 8,030,261,248
|
250 |
-
|
251 |
-
[INFO|callbacks.py:310] 2024-07-16 16:01:59,318 >> {'loss': 13.7821, 'learning_rate': 5.0000e-07, 'epoch': 0.03, 'throughput': 260.88}
|
252 |
-
|
253 |
-
[INFO|callbacks.py:310] 2024-07-16 16:02:12,483 >> {'loss': 13.6363, 'learning_rate': 1.0000e-06, 'epoch': 0.05, 'throughput': 344.08}
|
254 |
-
|
255 |
-
[INFO|callbacks.py:310] 2024-07-16 16:02:25,669 >> {'loss': 13.6033, 'learning_rate': 1.5000e-06, 'epoch': 0.08, 'throughput': 382.21}
|
256 |
-
|
257 |
-
[INFO|callbacks.py:310] 2024-07-16 16:02:38,846 >> {'loss': 12.5696, 'learning_rate': 2.0000e-06, 'epoch': 0.10, 'throughput': 404.48}
|
258 |
-
|
259 |
-
[INFO|callbacks.py:310] 2024-07-16 16:02:52,032 >> {'loss': 9.3589, 'learning_rate': 2.5000e-06, 'epoch': 0.13, 'throughput': 417.45}
|
260 |
-
|
261 |
-
[INFO|callbacks.py:310] 2024-07-16 16:03:05,202 >> {'loss': 6.7715, 'learning_rate': 3.0000e-06, 'epoch': 0.15, 'throughput': 427.06}
|
262 |
-
|
263 |
-
[INFO|callbacks.py:310] 2024-07-16 16:03:18,375 >> {'loss': 5.3541, 'learning_rate': 3.5000e-06, 'epoch': 0.18, 'throughput': 434.07}
|
264 |
-
|
265 |
-
[INFO|callbacks.py:310] 2024-07-16 16:03:31,565 >> {'loss': 1.9295, 'learning_rate': 4.0000e-06, 'epoch': 0.21, 'throughput': 439.30}
|
266 |
-
|
267 |
-
[INFO|callbacks.py:310] 2024-07-16 16:03:44,763 >> {'loss': 0.6328, 'learning_rate': 4.5000e-06, 'epoch': 0.23, 'throughput': 441.47}
|
268 |
-
|
269 |
-
[INFO|callbacks.py:310] 2024-07-16 16:03:57,936 >> {'loss': 3.3225, 'learning_rate': 5.0000e-06, 'epoch': 0.26, 'throughput': 444.77}
|
270 |
-
|
271 |
-
[INFO|callbacks.py:310] 2024-07-16 16:04:11,125 >> {'loss': 0.2598, 'learning_rate': 4.9996e-06, 'epoch': 0.28, 'throughput': 447.47}
|
272 |
-
|
273 |
-
[INFO|callbacks.py:310] 2024-07-16 16:04:24,294 >> {'loss': 0.6874, 'learning_rate': 4.9985e-06, 'epoch': 0.31, 'throughput': 450.46}
|
274 |
-
|
275 |
-
[INFO|callbacks.py:310] 2024-07-16 16:04:37,477 >> {'loss': 2.0329, 'learning_rate': 4.9966e-06, 'epoch': 0.34, 'throughput': 452.54}
|
276 |
-
|
277 |
-
[INFO|callbacks.py:310] 2024-07-16 16:04:50,644 >> {'loss': 0.4942, 'learning_rate': 4.9939e-06, 'epoch': 0.36, 'throughput': 454.14}
|
278 |
-
|
279 |
-
[INFO|callbacks.py:310] 2024-07-16 16:05:03,824 >> {'loss': 1.1786, 'learning_rate': 4.9905e-06, 'epoch': 0.39, 'throughput': 456.59}
|
280 |
-
|
281 |
-
[INFO|callbacks.py:310] 2024-07-16 16:05:17,004 >> {'loss': 0.4424, 'learning_rate': 4.9863e-06, 'epoch': 0.41, 'throughput': 458.45}
|
282 |
-
|
283 |
-
[INFO|callbacks.py:310] 2024-07-16 16:05:30,183 >> {'loss': 0.3336, 'learning_rate': 4.9814e-06, 'epoch': 0.44, 'throughput': 459.83}
|
284 |
-
|
285 |
-
[INFO|callbacks.py:310] 2024-07-16 16:05:43,368 >> {'loss': 0.2568, 'learning_rate': 4.9757e-06, 'epoch': 0.46, 'throughput': 460.35}
|
286 |
-
|
287 |
-
[INFO|callbacks.py:310] 2024-07-16 16:05:56,545 >> {'loss': 0.1889, 'learning_rate': 4.9692e-06, 'epoch': 0.49, 'throughput': 461.44}
|
288 |
-
|
289 |
-
[INFO|callbacks.py:310] 2024-07-16 16:06:09,717 >> {'loss': 0.1974, 'learning_rate': 4.9620e-06, 'epoch': 0.52, 'throughput': 462.27}
|
290 |
-
|
291 |
-
[INFO|callbacks.py:310] 2024-07-16 16:06:22,902 >> {'loss': 0.1766, 'learning_rate': 4.9541e-06, 'epoch': 0.54, 'throughput': 463.99}
|
292 |
-
|
293 |
-
[INFO|callbacks.py:310] 2024-07-16 16:06:36,060 >> {'loss': 0.1694, 'learning_rate': 4.9454e-06, 'epoch': 0.57, 'throughput': 464.28}
|
294 |
-
|
295 |
-
[INFO|callbacks.py:310] 2024-07-16 16:06:49,233 >> {'loss': 0.1374, 'learning_rate': 4.9359e-06, 'epoch': 0.59, 'throughput': 465.03}
|
296 |
-
|
297 |
-
[INFO|callbacks.py:310] 2024-07-16 16:07:02,410 >> {'loss': 0.1496, 'learning_rate': 4.9257e-06, 'epoch': 0.62, 'throughput': 466.25}
|
298 |
-
|
299 |
-
[INFO|callbacks.py:310] 2024-07-16 16:07:15,596 >> {'loss': 0.1554, 'learning_rate': 4.9148e-06, 'epoch': 0.65, 'throughput': 466.38}
|
300 |
-
|
301 |
-
[INFO|callbacks.py:310] 2024-07-16 16:07:28,752 >> {'loss': 0.0918, 'learning_rate': 4.9032e-06, 'epoch': 0.67, 'throughput': 466.86}
|
302 |
-
|
303 |
-
[INFO|callbacks.py:310] 2024-07-16 16:07:41,916 >> {'loss': 0.1062, 'learning_rate': 4.8908e-06, 'epoch': 0.70, 'throughput': 467.90}
|
304 |
-
|
305 |
-
[INFO|callbacks.py:310] 2024-07-16 16:07:55,082 >> {'loss': 0.1975, 'learning_rate': 4.8776e-06, 'epoch': 0.72, 'throughput': 468.27}
|
306 |
-
|
307 |
-
[INFO|callbacks.py:310] 2024-07-16 16:08:08,244 >> {'loss': 0.1389, 'learning_rate': 4.8638e-06, 'epoch': 0.75, 'throughput': 468.71}
|
308 |
-
|
309 |
-
[INFO|callbacks.py:310] 2024-07-16 16:08:21,418 >> {'loss': 0.1382, 'learning_rate': 4.8492e-06, 'epoch': 0.77, 'throughput': 469.38}
|
310 |
-
|
311 |
-
[INFO|callbacks.py:310] 2024-07-16 16:08:34,599 >> {'loss': 0.1982, 'learning_rate': 4.8340e-06, 'epoch': 0.80, 'throughput': 469.24}
|
312 |
-
|
313 |
-
[INFO|callbacks.py:310] 2024-07-16 16:08:47,765 >> {'loss': 0.1072, 'learning_rate': 4.8180e-06, 'epoch': 0.83, 'throughput': 469.35}
|
314 |
-
|
315 |
-
[INFO|callbacks.py:310] 2024-07-16 16:09:00,929 >> {'loss': 0.0757, 'learning_rate': 4.8013e-06, 'epoch': 0.85, 'throughput': 470.10}
|
316 |
-
|
317 |
-
[INFO|callbacks.py:310] 2024-07-16 16:09:14,099 >> {'loss': 0.0829, 'learning_rate': 4.7839e-06, 'epoch': 0.88, 'throughput': 470.13}
|
318 |
-
|
319 |
-
[INFO|callbacks.py:310] 2024-07-16 16:09:27,270 >> {'loss': 0.1017, 'learning_rate': 4.7658e-06, 'epoch': 0.90, 'throughput': 470.20}
|
320 |
-
|
321 |
-
[INFO|callbacks.py:310] 2024-07-16 16:09:40,431 >> {'loss': 0.0957, 'learning_rate': 4.7470e-06, 'epoch': 0.93, 'throughput': 470.47}
|
322 |
-
|
323 |
-
[INFO|callbacks.py:310] 2024-07-16 16:09:53,603 >> {'loss': 0.0999, 'learning_rate': 4.7275e-06, 'epoch': 0.95, 'throughput': 471.30}
|
324 |
-
|
325 |
-
[INFO|callbacks.py:310] 2024-07-16 16:10:06,768 >> {'loss': 0.0581, 'learning_rate': 4.7074e-06, 'epoch': 0.98, 'throughput': 471.80}
|
326 |
-
|
327 |
-
[INFO|callbacks.py:310] 2024-07-16 16:10:19,934 >> {'loss': 0.0923, 'learning_rate': 4.6865e-06, 'epoch': 1.01, 'throughput': 472.34}
|
328 |
-
|
329 |
-
[INFO|callbacks.py:310] 2024-07-16 16:10:33,082 >> {'loss': 0.0506, 'learning_rate': 4.6651e-06, 'epoch': 1.03, 'throughput': 472.58}
|
330 |
-
|
331 |
-
[INFO|callbacks.py:310] 2024-07-16 16:10:46,249 >> {'loss': 0.0333, 'learning_rate': 4.6429e-06, 'epoch': 1.06, 'throughput': 472.58}
|
332 |
-
|
333 |
-
[INFO|callbacks.py:310] 2024-07-16 16:10:59,408 >> {'loss': 0.0380, 'learning_rate': 4.6201e-06, 'epoch': 1.08, 'throughput': 472.96}
|
334 |
-
|
335 |
-
[INFO|callbacks.py:310] 2024-07-16 16:11:12,564 >> {'loss': 0.0416, 'learning_rate': 4.5967e-06, 'epoch': 1.11, 'throughput': 473.13}
|
336 |
-
|
337 |
-
[INFO|callbacks.py:310] 2024-07-16 16:11:25,735 >> {'loss': 0.1068, 'learning_rate': 4.5726e-06, 'epoch': 1.14, 'throughput': 473.22}
|
338 |
-
|
339 |
-
[INFO|callbacks.py:310] 2024-07-16 16:11:38,904 >> {'loss': 0.0369, 'learning_rate': 4.5479e-06, 'epoch': 1.16, 'throughput': 473.32}
|
340 |
-
|
341 |
-
[INFO|callbacks.py:310] 2024-07-16 16:11:52,063 >> {'loss': 0.1703, 'learning_rate': 4.5225e-06, 'epoch': 1.19, 'throughput': 473.49}
|
342 |
-
|
343 |
-
[INFO|callbacks.py:310] 2024-07-16 16:12:05,226 >> {'loss': 0.1102, 'learning_rate': 4.4966e-06, 'epoch': 1.21, 'throughput': 473.60}
|
344 |
-
|
345 |
-
[INFO|callbacks.py:310] 2024-07-16 16:12:18,398 >> {'loss': 0.0595, 'learning_rate': 4.4700e-06, 'epoch': 1.24, 'throughput': 473.71}
|
346 |
-
|
347 |
-
[INFO|callbacks.py:310] 2024-07-16 16:12:31,565 >> {'loss': 0.1009, 'learning_rate': 4.4429e-06, 'epoch': 1.26, 'throughput': 473.98}
|
348 |
-
|
349 |
-
[INFO|callbacks.py:310] 2024-07-16 16:12:44,725 >> {'loss': 0.0434, 'learning_rate': 4.4151e-06, 'epoch': 1.29, 'throughput': 474.12}
|
350 |
-
|
351 |
-
[INFO|callbacks.py:310] 2024-07-16 16:12:57,869 >> {'loss': 0.0281, 'learning_rate': 4.3868e-06, 'epoch': 1.32, 'throughput': 474.46}
|
352 |
-
|
353 |
-
[INFO|callbacks.py:310] 2024-07-16 16:13:11,049 >> {'loss': 0.0513, 'learning_rate': 4.3579e-06, 'epoch': 1.34, 'throughput': 474.35}
|
354 |
-
|
355 |
-
[INFO|callbacks.py:310] 2024-07-16 16:13:24,229 >> {'loss': 0.0902, 'learning_rate': 4.3284e-06, 'epoch': 1.37, 'throughput': 474.43}
|
356 |
-
|
357 |
-
[INFO|callbacks.py:310] 2024-07-16 16:13:37,389 >> {'loss': 0.0448, 'learning_rate': 4.2983e-06, 'epoch': 1.39, 'throughput': 474.55}
|
358 |
-
|
359 |
-
[INFO|callbacks.py:310] 2024-07-16 16:13:50,556 >> {'loss': 0.0360, 'learning_rate': 4.2678e-06, 'epoch': 1.42, 'throughput': 474.98}
|
360 |
-
|
361 |
-
[INFO|callbacks.py:310] 2024-07-16 16:14:03,729 >> {'loss': 0.0279, 'learning_rate': 4.2366e-06, 'epoch': 1.45, 'throughput': 475.04}
|
362 |
-
|
363 |
-
[INFO|callbacks.py:310] 2024-07-16 16:14:16,913 >> {'loss': 0.0527, 'learning_rate': 4.2050e-06, 'epoch': 1.47, 'throughput': 475.14}
|
364 |
-
|
365 |
-
[INFO|callbacks.py:310] 2024-07-16 16:14:30,074 >> {'loss': 0.0466, 'learning_rate': 4.1728e-06, 'epoch': 1.50, 'throughput': 475.66}
|
366 |
-
|
367 |
-
[INFO|callbacks.py:310] 2024-07-16 16:14:43,261 >> {'loss': 0.0203, 'learning_rate': 4.1401e-06, 'epoch': 1.52, 'throughput': 475.90}
|
368 |
-
|
369 |
-
[INFO|callbacks.py:310] 2024-07-16 16:14:56,437 >> {'loss': 0.0693, 'learning_rate': 4.1070e-06, 'epoch': 1.55, 'throughput': 475.74}
|
370 |
-
|
371 |
-
[INFO|callbacks.py:310] 2024-07-16 16:15:09,625 >> {'loss': 0.0193, 'learning_rate': 4.0733e-06, 'epoch': 1.57, 'throughput': 475.58}
|
372 |
-
|
373 |
-
[INFO|callbacks.py:310] 2024-07-16 16:15:22,819 >> {'loss': 0.1155, 'learning_rate': 4.0392e-06, 'epoch': 1.60, 'throughput': 475.95}
|
374 |
-
|
375 |
-
[INFO|callbacks.py:310] 2024-07-16 16:15:36,030 >> {'loss': 0.0594, 'learning_rate': 4.0045e-06, 'epoch': 1.63, 'throughput': 476.06}
|
376 |
-
|
377 |
-
[INFO|callbacks.py:310] 2024-07-16 16:15:49,202 >> {'loss': 0.0391, 'learning_rate': 3.9695e-06, 'epoch': 1.65, 'throughput': 476.02}
|
378 |
-
|
379 |
-
[INFO|callbacks.py:310] 2024-07-16 16:16:02,414 >> {'loss': 0.0552, 'learning_rate': 3.9339e-06, 'epoch': 1.68, 'throughput': 476.02}
|
380 |
-
|
381 |
-
[INFO|callbacks.py:310] 2024-07-16 16:16:15,619 >> {'loss': 0.0300, 'learning_rate': 3.8980e-06, 'epoch': 1.70, 'throughput': 476.12}
|
382 |
-
|
383 |
-
[INFO|callbacks.py:310] 2024-07-16 16:16:28,843 >> {'loss': 0.0458, 'learning_rate': 3.8616e-06, 'epoch': 1.73, 'throughput': 476.36}
|
384 |
-
|
385 |
-
[INFO|callbacks.py:310] 2024-07-16 16:16:42,017 >> {'loss': 0.0502, 'learning_rate': 3.8248e-06, 'epoch': 1.75, 'throughput': 476.58}
|
386 |
-
|
387 |
-
[INFO|callbacks.py:310] 2024-07-16 16:16:55,194 >> {'loss': 0.0513, 'learning_rate': 3.7876e-06, 'epoch': 1.78, 'throughput': 476.59}
|
388 |
-
|
389 |
-
[INFO|callbacks.py:310] 2024-07-16 16:17:08,356 >> {'loss': 0.0309, 'learning_rate': 3.7500e-06, 'epoch': 1.81, 'throughput': 476.93}
|
390 |
-
|
391 |
-
[INFO|callbacks.py:310] 2024-07-16 16:17:21,528 >> {'loss': 0.0889, 'learning_rate': 3.7120e-06, 'epoch': 1.83, 'throughput': 476.99}
|
392 |
-
|
393 |
-
[INFO|callbacks.py:310] 2024-07-16 16:17:34,696 >> {'loss': 0.0868, 'learning_rate': 3.6737e-06, 'epoch': 1.86, 'throughput': 476.95}
|
394 |
-
|
395 |
-
[INFO|callbacks.py:310] 2024-07-16 16:17:47,854 >> {'loss': 0.0516, 'learning_rate': 3.6350e-06, 'epoch': 1.88, 'throughput': 476.96}
|
396 |
-
|
397 |
-
[INFO|callbacks.py:310] 2024-07-16 16:18:01,019 >> {'loss': 0.0590, 'learning_rate': 3.5959e-06, 'epoch': 1.91, 'throughput': 477.28}
|
398 |
-
|
399 |
-
[INFO|callbacks.py:310] 2024-07-16 16:18:14,190 >> {'loss': 0.0475, 'learning_rate': 3.5565e-06, 'epoch': 1.94, 'throughput': 477.42}
|
400 |
-
|
401 |
-
[INFO|callbacks.py:310] 2024-07-16 16:18:27,351 >> {'loss': 0.0704, 'learning_rate': 3.5168e-06, 'epoch': 1.96, 'throughput': 477.51}
|
402 |
-
|
403 |
-
[INFO|callbacks.py:310] 2024-07-16 16:18:40,532 >> {'loss': 0.0666, 'learning_rate': 3.4768e-06, 'epoch': 1.99, 'throughput': 477.44}
|
404 |
-
|
405 |
-
[INFO|callbacks.py:310] 2024-07-16 16:18:53,697 >> {'loss': 0.0275, 'learning_rate': 3.4365e-06, 'epoch': 2.01, 'throughput': 477.39}
|
406 |
-
|
407 |
-
[INFO|callbacks.py:310] 2024-07-16 16:19:06,863 >> {'loss': 0.0169, 'learning_rate': 3.3959e-06, 'epoch': 2.04, 'throughput': 477.49}
|
408 |
-
|
409 |
-
[INFO|callbacks.py:310] 2024-07-16 16:19:20,024 >> {'loss': 0.0056, 'learning_rate': 3.3551e-06, 'epoch': 2.06, 'throughput': 477.79}
|
410 |
-
|
411 |
-
[INFO|callbacks.py:310] 2024-07-16 16:19:33,190 >> {'loss': 0.0139, 'learning_rate': 3.3139e-06, 'epoch': 2.09, 'throughput': 477.73}
|
412 |
-
|
413 |
-
[INFO|callbacks.py:310] 2024-07-16 16:19:46,365 >> {'loss': 0.0561, 'learning_rate': 3.2725e-06, 'epoch': 2.12, 'throughput': 477.99}
|
414 |
-
|
415 |
-
[INFO|callbacks.py:310] 2024-07-16 16:19:59,530 >> {'loss': 0.0098, 'learning_rate': 3.2309e-06, 'epoch': 2.14, 'throughput': 478.07}
|
416 |
-
|
417 |
-
[INFO|callbacks.py:310] 2024-07-16 16:20:12,693 >> {'loss': 0.0037, 'learning_rate': 3.1891e-06, 'epoch': 2.17, 'throughput': 478.10}
|
418 |
-
|
419 |
-
[INFO|callbacks.py:310] 2024-07-16 16:20:25,850 >> {'loss': 0.0194, 'learning_rate': 3.1470e-06, 'epoch': 2.19, 'throughput': 478.31}
|
420 |
-
|
421 |
-
[INFO|callbacks.py:310] 2024-07-16 16:20:39,012 >> {'loss': 0.0004, 'learning_rate': 3.1048e-06, 'epoch': 2.22, 'throughput': 478.31}
|
422 |
-
|
423 |
-
[INFO|callbacks.py:310] 2024-07-16 16:20:52,174 >> {'loss': 0.0003, 'learning_rate': 3.0624e-06, 'epoch': 2.25, 'throughput': 478.43}
|
424 |
-
|
425 |
-
[INFO|callbacks.py:310] 2024-07-16 16:21:05,338 >> {'loss': 0.0511, 'learning_rate': 3.0198e-06, 'epoch': 2.27, 'throughput': 478.42}
|
426 |
-
|
427 |
-
[INFO|callbacks.py:310] 2024-07-16 16:21:18,503 >> {'loss': 0.0974, 'learning_rate': 2.9770e-06, 'epoch': 2.30, 'throughput': 478.66}
|
428 |
-
|
429 |
-
[INFO|callbacks.py:310] 2024-07-16 16:21:31,670 >> {'loss': 0.0442, 'learning_rate': 2.9341e-06, 'epoch': 2.32, 'throughput': 478.60}
|
430 |
-
|
431 |
-
[INFO|callbacks.py:310] 2024-07-16 16:21:44,838 >> {'loss': 0.0802, 'learning_rate': 2.8911e-06, 'epoch': 2.35, 'throughput': 478.49}
|
432 |
-
|
433 |
-
[INFO|callbacks.py:310] 2024-07-16 16:21:58,015 >> {'loss': 0.0195, 'learning_rate': 2.8479e-06, 'epoch': 2.37, 'throughput': 478.66}
|
434 |
-
|
435 |
-
[INFO|callbacks.py:310] 2024-07-16 16:22:11,180 >> {'loss': 0.0550, 'learning_rate': 2.8047e-06, 'epoch': 2.40, 'throughput': 478.62}
|
436 |
-
|
437 |
-
[INFO|callbacks.py:310] 2024-07-16 16:22:24,345 >> {'loss': 0.0268, 'learning_rate': 2.7613e-06, 'epoch': 2.43, 'throughput': 478.66}
|
438 |
-
|
439 |
-
[INFO|callbacks.py:310] 2024-07-16 16:22:37,492 >> {'loss': 0.0196, 'learning_rate': 2.7179e-06, 'epoch': 2.45, 'throughput': 478.71}
|
440 |
-
|
441 |
-
[INFO|callbacks.py:310] 2024-07-16 16:22:50,668 >> {'loss': 0.0363, 'learning_rate': 2.6744e-06, 'epoch': 2.48, 'throughput': 478.69}
|
442 |
-
|
443 |
-
[INFO|callbacks.py:310] 2024-07-16 16:23:03,834 >> {'loss': 0.0046, 'learning_rate': 2.6308e-06, 'epoch': 2.50, 'throughput': 478.64}
|
444 |
-
|
445 |
-
[INFO|callbacks.py:310] 2024-07-16 16:23:16,992 >> {'loss': 0.0366, 'learning_rate': 2.5872e-06, 'epoch': 2.53, 'throughput': 478.64}
|
446 |
-
|
447 |
-
[INFO|callbacks.py:310] 2024-07-16 16:23:30,144 >> {'loss': 0.0051, 'learning_rate': 2.5436e-06, 'epoch': 2.55, 'throughput': 478.64}
|
448 |
-
|
449 |
-
[INFO|callbacks.py:310] 2024-07-16 16:23:43,301 >> {'loss': 0.0226, 'learning_rate': 2.5000e-06, 'epoch': 2.58, 'throughput': 478.82}
|
450 |
-
|
451 |
-
[INFO|callbacks.py:310] 2024-07-16 16:23:56,469 >> {'loss': 0.0818, 'learning_rate': 2.4564e-06, 'epoch': 2.61, 'throughput': 478.85}
|
452 |
-
|
453 |
-
[INFO|callbacks.py:310] 2024-07-16 16:24:09,632 >> {'loss': 0.0247, 'learning_rate': 2.4128e-06, 'epoch': 2.63, 'throughput': 478.93}
|
454 |
-
|
455 |
-
[INFO|callbacks.py:310] 2024-07-16 16:24:22,814 >> {'loss': 0.0593, 'learning_rate': 2.3692e-06, 'epoch': 2.66, 'throughput': 478.83}
|
456 |
-
|
457 |
-
[INFO|callbacks.py:310] 2024-07-16 16:24:35,972 >> {'loss': 0.0073, 'learning_rate': 2.3256e-06, 'epoch': 2.68, 'throughput': 479.05}
|
458 |
-
|
459 |
-
[INFO|callbacks.py:310] 2024-07-16 16:24:49,129 >> {'loss': 0.0295, 'learning_rate': 2.2821e-06, 'epoch': 2.71, 'throughput': 479.07}
|
460 |
-
|
461 |
-
[INFO|callbacks.py:310] 2024-07-16 16:25:02,310 >> {'loss': 0.0115, 'learning_rate': 2.2387e-06, 'epoch': 2.74, 'throughput': 478.96}
|
462 |
-
|
463 |
-
[INFO|callbacks.py:310] 2024-07-16 16:25:15,470 >> {'loss': 0.0064, 'learning_rate': 2.1953e-06, 'epoch': 2.76, 'throughput': 478.95}
|
464 |
-
|
465 |
-
[INFO|callbacks.py:310] 2024-07-16 16:25:28,641 >> {'loss': 0.0229, 'learning_rate': 2.1521e-06, 'epoch': 2.79, 'throughput': 478.89}
|
466 |
-
|
467 |
-
[INFO|callbacks.py:310] 2024-07-16 16:25:41,814 >> {'loss': 0.0605, 'learning_rate': 2.1089e-06, 'epoch': 2.81, 'throughput': 478.89}
|
468 |
-
|
469 |
-
[INFO|callbacks.py:310] 2024-07-16 16:25:54,992 >> {'loss': 0.0500, 'learning_rate': 2.0659e-06, 'epoch': 2.84, 'throughput': 478.95}
|
470 |
-
|
471 |
-
[INFO|callbacks.py:310] 2024-07-16 16:26:08,163 >> {'loss': 0.0544, 'learning_rate': 2.0230e-06, 'epoch': 2.86, 'throughput': 478.93}
|
472 |
-
|
473 |
-
[INFO|callbacks.py:310] 2024-07-16 16:26:21,350 >> {'loss': 0.0109, 'learning_rate': 1.9802e-06, 'epoch': 2.89, 'throughput': 478.90}
|
474 |
-
|
475 |
-
[INFO|callbacks.py:310] 2024-07-16 16:26:34,506 >> {'loss': 0.0242, 'learning_rate': 1.9376e-06, 'epoch': 2.92, 'throughput': 478.86}
|
476 |
-
|
477 |
-
[INFO|callbacks.py:310] 2024-07-16 16:26:47,673 >> {'loss': 0.0223, 'learning_rate': 1.8952e-06, 'epoch': 2.94, 'throughput': 479.09}
|
478 |
-
|
479 |
-
[INFO|callbacks.py:310] 2024-07-16 16:27:00,837 >> {'loss': 0.0263, 'learning_rate': 1.8530e-06, 'epoch': 2.97, 'throughput': 479.20}
|
480 |
-
|
481 |
-
[INFO|callbacks.py:310] 2024-07-16 16:27:13,992 >> {'loss': 0.0014, 'learning_rate': 1.8109e-06, 'epoch': 2.99, 'throughput': 479.12}
|
482 |
-
|
483 |
-
[INFO|callbacks.py:310] 2024-07-16 16:27:27,158 >> {'loss': 0.0061, 'learning_rate': 1.7691e-06, 'epoch': 3.02, 'throughput': 479.09}
|
484 |
-
|
485 |
-
[INFO|callbacks.py:310] 2024-07-16 16:27:40,327 >> {'loss': 0.0296, 'learning_rate': 1.7275e-06, 'epoch': 3.05, 'throughput': 479.08}
|
486 |
-
|
487 |
-
[INFO|callbacks.py:310] 2024-07-16 16:27:53,500 >> {'loss': 0.0186, 'learning_rate': 1.6861e-06, 'epoch': 3.07, 'throughput': 479.11}
|
488 |
-
|
489 |
-
[INFO|callbacks.py:310] 2024-07-16 16:28:06,669 >> {'loss': 0.0038, 'learning_rate': 1.6449e-06, 'epoch': 3.10, 'throughput': 478.93}
|
490 |
-
|
491 |
-
[INFO|callbacks.py:310] 2024-07-16 16:28:19,843 >> {'loss': 0.0033, 'learning_rate': 1.6041e-06, 'epoch': 3.12, 'throughput': 478.90}
|
492 |
-
|
493 |
-
[INFO|callbacks.py:310] 2024-07-16 16:28:33,006 >> {'loss': 0.0091, 'learning_rate': 1.5635e-06, 'epoch': 3.15, 'throughput': 478.92}
|
494 |
-
|
495 |
-
[INFO|callbacks.py:310] 2024-07-16 16:28:46,166 >> {'loss': 0.0012, 'learning_rate': 1.5232e-06, 'epoch': 3.17, 'throughput': 478.94}
|
496 |
-
|
497 |
-
[INFO|callbacks.py:310] 2024-07-16 16:28:59,319 >> {'loss': 0.0223, 'learning_rate': 1.4832e-06, 'epoch': 3.20, 'throughput': 479.08}
|
498 |
-
|
499 |
-
[INFO|callbacks.py:310] 2024-07-16 16:29:12,500 >> {'loss': 0.0131, 'learning_rate': 1.4435e-06, 'epoch': 3.23, 'throughput': 479.02}
|
500 |
-
|
501 |
-
[INFO|callbacks.py:310] 2024-07-16 16:29:25,669 >> {'loss': 0.0008, 'learning_rate': 1.4041e-06, 'epoch': 3.25, 'throughput': 479.00}
|
502 |
-
|
503 |
-
[INFO|callbacks.py:310] 2024-07-16 16:29:38,814 >> {'loss': 0.0058, 'learning_rate': 1.3650e-06, 'epoch': 3.28, 'throughput': 479.10}
|
504 |
-
|
505 |
-
[INFO|callbacks.py:310] 2024-07-16 16:29:51,974 >> {'loss': 0.0065, 'learning_rate': 1.3263e-06, 'epoch': 3.30, 'throughput': 479.09}
|
506 |
-
|
507 |
-
[INFO|callbacks.py:310] 2024-07-16 16:30:05,138 >> {'loss': 0.0398, 'learning_rate': 1.2880e-06, 'epoch': 3.33, 'throughput': 479.13}
|
508 |
-
|
509 |
-
[INFO|callbacks.py:310] 2024-07-16 16:30:18,301 >> {'loss': 0.0005, 'learning_rate': 1.2500e-06, 'epoch': 3.35, 'throughput': 479.20}
|
510 |
-
|
511 |
-
[INFO|callbacks.py:310] 2024-07-16 16:30:31,457 >> {'loss': 0.0049, 'learning_rate': 1.2124e-06, 'epoch': 3.38, 'throughput': 479.35}
|
512 |
-
|
513 |
-
[INFO|callbacks.py:310] 2024-07-16 16:30:44,626 >> {'loss': 0.0061, 'learning_rate': 1.1752e-06, 'epoch': 3.41, 'throughput': 479.38}
|
514 |
-
|
515 |
-
[INFO|callbacks.py:310] 2024-07-16 16:30:57,792 >> {'loss': 0.0111, 'learning_rate': 1.1384e-06, 'epoch': 3.43, 'throughput': 479.56}
|
516 |
-
|
517 |
-
[INFO|callbacks.py:310] 2024-07-16 16:31:10,962 >> {'loss': 0.0049, 'learning_rate': 1.1020e-06, 'epoch': 3.46, 'throughput': 479.60}
|
518 |
-
|
519 |
-
[INFO|callbacks.py:310] 2024-07-16 16:31:24,131 >> {'loss': 0.0012, 'learning_rate': 1.0661e-06, 'epoch': 3.48, 'throughput': 479.57}
|
520 |
-
|
521 |
-
[INFO|callbacks.py:310] 2024-07-16 16:31:37,297 >> {'loss': 0.0004, 'learning_rate': 1.0305e-06, 'epoch': 3.51, 'throughput': 479.59}
|
522 |
-
|
523 |
-
[INFO|callbacks.py:310] 2024-07-16 16:31:50,469 >> {'loss': 0.0006, 'learning_rate': 9.9546e-07, 'epoch': 3.54, 'throughput': 479.51}
|
524 |
-
|
525 |
-
[INFO|callbacks.py:310] 2024-07-16 16:32:03,645 >> {'loss': 0.0003, 'learning_rate': 9.6085e-07, 'epoch': 3.56, 'throughput': 479.49}
|
526 |
-
|
527 |
-
[INFO|callbacks.py:310] 2024-07-16 16:32:16,814 >> {'loss': 0.0004, 'learning_rate': 9.2670e-07, 'epoch': 3.59, 'throughput': 479.61}
|
528 |
-
|
529 |
-
[INFO|callbacks.py:310] 2024-07-16 16:32:29,980 >> {'loss': 0.0016, 'learning_rate': 8.9303e-07, 'epoch': 3.61, 'throughput': 479.62}
|
530 |
-
|
531 |
-
[INFO|callbacks.py:310] 2024-07-16 16:32:43,150 >> {'loss': 0.0268, 'learning_rate': 8.5985e-07, 'epoch': 3.64, 'throughput': 479.64}
|
532 |
-
|
533 |
-
[INFO|callbacks.py:310] 2024-07-16 16:32:56,321 >> {'loss': 0.0018, 'learning_rate': 8.2717e-07, 'epoch': 3.66, 'throughput': 479.52}
|
534 |
-
|
535 |
-
[INFO|callbacks.py:310] 2024-07-16 16:33:09,497 >> {'loss': 0.0100, 'learning_rate': 7.9500e-07, 'epoch': 3.69, 'throughput': 479.48}
|
536 |
-
|
537 |
-
[INFO|callbacks.py:310] 2024-07-16 16:33:22,661 >> {'loss': 0.0209, 'learning_rate': 7.6335e-07, 'epoch': 3.72, 'throughput': 479.66}
|
538 |
-
|
539 |
-
[INFO|callbacks.py:310] 2024-07-16 16:33:35,822 >> {'loss': 0.0076, 'learning_rate': 7.3223e-07, 'epoch': 3.74, 'throughput': 479.70}
|
540 |
-
|
541 |
-
[INFO|callbacks.py:310] 2024-07-16 16:33:48,985 >> {'loss': 0.0227, 'learning_rate': 7.0165e-07, 'epoch': 3.77, 'throughput': 479.79}
|
542 |
-
|
543 |
-
[INFO|callbacks.py:310] 2024-07-16 16:34:02,148 >> {'loss': 0.0002, 'learning_rate': 6.7162e-07, 'epoch': 3.79, 'throughput': 479.87}
|
544 |
-
|
545 |
-
[INFO|callbacks.py:310] 2024-07-16 16:34:15,317 >> {'loss': 0.0296, 'learning_rate': 6.4214e-07, 'epoch': 3.82, 'throughput': 479.86}
|
546 |
-
|
547 |
-
[INFO|callbacks.py:310] 2024-07-16 16:34:28,475 >> {'loss': 0.0006, 'learning_rate': 6.1323e-07, 'epoch': 3.85, 'throughput': 479.79}
|
548 |
-
|
549 |
-
[INFO|callbacks.py:310] 2024-07-16 16:34:41,637 >> {'loss': 0.0012, 'learning_rate': 5.8489e-07, 'epoch': 3.87, 'throughput': 479.85}
|
550 |
-
|
551 |
-
[INFO|callbacks.py:310] 2024-07-16 16:34:54,805 >> {'loss': 0.0007, 'learning_rate': 5.5714e-07, 'epoch': 3.90, 'throughput': 479.79}
|
552 |
-
|
553 |
-
[INFO|callbacks.py:310] 2024-07-16 16:35:07,976 >> {'loss': 0.0003, 'learning_rate': 5.2997e-07, 'epoch': 3.92, 'throughput': 479.87}
|
554 |
-
|
555 |
-
[INFO|callbacks.py:310] 2024-07-16 16:35:21,144 >> {'loss': 0.0005, 'learning_rate': 5.0341e-07, 'epoch': 3.95, 'throughput': 479.85}
|
556 |
-
|
557 |
-
[INFO|callbacks.py:310] 2024-07-16 16:35:34,309 >> {'loss': 0.0008, 'learning_rate': 4.7746e-07, 'epoch': 3.97, 'throughput': 479.92}
|
558 |
-
|
559 |
-
[INFO|callbacks.py:310] 2024-07-16 16:35:47,468 >> {'loss': 0.0003, 'learning_rate': 4.5212e-07, 'epoch': 4.00, 'throughput': 480.10}
|
560 |
-
|
561 |
-
[INFO|callbacks.py:310] 2024-07-16 16:36:00,628 >> {'loss': 0.0015, 'learning_rate': 4.2741e-07, 'epoch': 4.03, 'throughput': 480.13}
|
562 |
-
|
563 |
-
[INFO|callbacks.py:310] 2024-07-16 16:36:13,783 >> {'loss': 0.0007, 'learning_rate': 4.0332e-07, 'epoch': 4.05, 'throughput': 480.16}
|
564 |
-
|
565 |
-
[INFO|callbacks.py:310] 2024-07-16 16:36:26,955 >> {'loss': 0.0002, 'learning_rate': 3.7988e-07, 'epoch': 4.08, 'throughput': 480.08}
|
566 |
-
|
567 |
-
[INFO|callbacks.py:310] 2024-07-16 16:36:40,127 >> {'loss': 0.0052, 'learning_rate': 3.5708e-07, 'epoch': 4.10, 'throughput': 480.01}
|
568 |
-
|
569 |
-
[INFO|callbacks.py:310] 2024-07-16 16:36:53,283 >> {'loss': 0.0040, 'learning_rate': 3.3494e-07, 'epoch': 4.13, 'throughput': 479.97}
|
570 |
-
|
571 |
-
[INFO|callbacks.py:310] 2024-07-16 16:37:06,436 >> {'loss': 0.0004, 'learning_rate': 3.1345e-07, 'epoch': 4.15, 'throughput': 480.06}
|
572 |
-
|
573 |
-
[INFO|callbacks.py:310] 2024-07-16 16:37:19,608 >> {'loss': 0.0020, 'learning_rate': 2.9263e-07, 'epoch': 4.18, 'throughput': 480.12}
|
574 |
-
|
575 |
-
[INFO|callbacks.py:310] 2024-07-16 16:37:32,769 >> {'loss': 0.0001, 'learning_rate': 2.7248e-07, 'epoch': 4.21, 'throughput': 480.10}
|
576 |
-
|
577 |
-
[INFO|callbacks.py:310] 2024-07-16 16:37:45,932 >> {'loss': 0.0001, 'learning_rate': 2.5301e-07, 'epoch': 4.23, 'throughput': 480.03}
|
578 |
-
|
579 |
-
[INFO|callbacks.py:310] 2024-07-16 16:37:59,104 >> {'loss': 0.0002, 'learning_rate': 2.3423e-07, 'epoch': 4.26, 'throughput': 480.08}
|
580 |
-
|
581 |
-
[INFO|callbacks.py:310] 2024-07-16 16:38:12,270 >> {'loss': 0.0076, 'learning_rate': 2.1614e-07, 'epoch': 4.28, 'throughput': 480.00}
|
582 |
-
|
583 |
-
[INFO|callbacks.py:310] 2024-07-16 16:38:25,424 >> {'loss': 0.0001, 'learning_rate': 1.9874e-07, 'epoch': 4.31, 'throughput': 480.08}
|
584 |
-
|
585 |
-
[INFO|callbacks.py:310] 2024-07-16 16:38:38,584 >> {'loss': 0.0002, 'learning_rate': 1.8204e-07, 'epoch': 4.34, 'throughput': 480.02}
|
586 |
-
|
587 |
-
[INFO|callbacks.py:310] 2024-07-16 16:38:51,770 >> {'loss': 0.0001, 'learning_rate': 1.6605e-07, 'epoch': 4.36, 'throughput': 479.94}
|
588 |
-
|
589 |
-
[INFO|callbacks.py:310] 2024-07-16 16:39:04,932 >> {'loss': 0.0001, 'learning_rate': 1.5077e-07, 'epoch': 4.39, 'throughput': 480.03}
|
590 |
-
|
591 |
-
[INFO|callbacks.py:310] 2024-07-16 16:39:18,090 >> {'loss': 0.0002, 'learning_rate': 1.3620e-07, 'epoch': 4.41, 'throughput': 480.18}
|
592 |
-
|
593 |
-
[INFO|callbacks.py:310] 2024-07-16 16:39:31,264 >> {'loss': 0.0001, 'learning_rate': 1.2236e-07, 'epoch': 4.44, 'throughput': 480.20}
|
594 |
-
|
595 |
-
[INFO|callbacks.py:310] 2024-07-16 16:39:44,432 >> {'loss': 0.0005, 'learning_rate': 1.0924e-07, 'epoch': 4.46, 'throughput': 480.29}
|
596 |
-
|
597 |
-
[INFO|callbacks.py:310] 2024-07-16 16:39:57,582 >> {'loss': 0.0001, 'learning_rate': 9.6846e-08, 'epoch': 4.49, 'throughput': 480.29}
|
598 |
-
|
599 |
-
[INFO|callbacks.py:310] 2024-07-16 16:40:10,744 >> {'loss': 0.0001, 'learning_rate': 8.5185e-08, 'epoch': 4.52, 'throughput': 480.35}
|
600 |
-
|
601 |
-
[INFO|callbacks.py:310] 2024-07-16 16:40:23,896 >> {'loss': 0.0081, 'learning_rate': 7.4261e-08, 'epoch': 4.54, 'throughput': 480.49}
|
602 |
-
|
603 |
-
[INFO|callbacks.py:310] 2024-07-16 16:40:37,074 >> {'loss': 0.0002, 'learning_rate': 6.4075e-08, 'epoch': 4.57, 'throughput': 480.44}
|
604 |
-
|
605 |
-
[INFO|callbacks.py:310] 2024-07-16 16:40:50,235 >> {'loss': 0.0003, 'learning_rate': 5.4631e-08, 'epoch': 4.59, 'throughput': 480.50}
|
606 |
-
|
607 |
-
[INFO|callbacks.py:310] 2024-07-16 16:41:03,386 >> {'loss': 0.0001, 'learning_rate': 4.5932e-08, 'epoch': 4.62, 'throughput': 480.53}
|
608 |
-
|
609 |
-
[INFO|callbacks.py:310] 2024-07-16 16:41:16,551 >> {'loss': 0.0005, 'learning_rate': 3.7981e-08, 'epoch': 4.65, 'throughput': 480.53}
|
610 |
-
|
611 |
-
[INFO|callbacks.py:310] 2024-07-16 16:41:29,712 >> {'loss': 0.0001, 'learning_rate': 3.0779e-08, 'epoch': 4.67, 'throughput': 480.54}
|
612 |
-
|
613 |
-
[INFO|callbacks.py:310] 2024-07-16 16:41:42,867 >> {'loss': 0.0002, 'learning_rate': 2.4330e-08, 'epoch': 4.70, 'throughput': 480.53}
|
614 |
|
615 |
-
[INFO|
|
616 |
|
617 |
-
[INFO|
|
|
|
618 |
|
619 |
-
[INFO|
|
620 |
|
621 |
-
[INFO|
|
622 |
|
623 |
-
[
|
624 |
|
625 |
-
|
626 |
|
627 |
-
|
628 |
|
629 |
-
|
630 |
|
631 |
-
|
632 |
|
633 |
-
|
634 |
|
635 |
-
|
636 |
|
637 |
-
|
638 |
|
639 |
-
|
640 |
|
641 |
-
|
642 |
|
643 |
-
|
644 |
|
645 |
-
|
646 |
|
|
|
647 |
|
|
|
648 |
|
649 |
-
|
650 |
|
651 |
-
|
652 |
|
653 |
-
|
654 |
|
655 |
-
|
656 |
|
657 |
-
|
658 |
|
659 |
-
|
660 |
|
661 |
-
|
662 |
|
663 |
-
|
664 |
|
665 |
-
[INFO|
|
666 |
-
{'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}}
|
667 |
|
|
|
1 |
+
07/16/2024 16:46:13 - WARNING - transformers.tokenization_utils_base - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
2 |
|
3 |
+
07/16/2024 16:46:13 - INFO - llamafactory.data.template - Replace eos token: <|eot_id|>
|
4 |
|
5 |
+
07/16/2024 16:46:13 - WARNING - transformers.tokenization_utils_base - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
6 |
|
7 |
+
07/16/2024 16:46:13 - INFO - llamafactory.data.template - Replace eos token: <|eot_id|>
|
8 |
|
9 |
+
[INFO|parser.py:325] 2024-07-16 16:46:13,724 >> Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: True, compute dtype: None
|
10 |
|
11 |
+
[INFO|tokenization_utils_base.py:2159] 2024-07-16 16:46:13,726 >> loading file tokenizer.json
|
12 |
|
13 |
+
[INFO|tokenization_utils_base.py:2159] 2024-07-16 16:46:13,726 >> loading file added_tokens.json
|
14 |
|
15 |
+
07/16/2024 16:46:13 - WARNING - transformers.tokenization_utils_base - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
16 |
|
17 |
+
07/16/2024 16:46:13 - INFO - llamafactory.data.template - Replace eos token: <|eot_id|>
|
18 |
|
19 |
+
07/16/2024 16:46:13 - INFO - llamafactory.hparams.parser - Process rank: 3, device: cuda:3, n_gpu: 1, distributed training: True, compute dtype: None
|
20 |
|
21 |
+
07/16/2024 16:46:13 - INFO - llamafactory.hparams.parser - Process rank: 7, device: cuda:7, n_gpu: 1, distributed training: True, compute dtype: None
|
22 |
|
23 |
+
07/16/2024 16:46:13 - INFO - llamafactory.hparams.parser - Process rank: 6, device: cuda:6, n_gpu: 1, distributed training: True, compute dtype: None
|
24 |
|
25 |
+
[INFO|tokenization_utils_base.py:2159] 2024-07-16 16:46:13,726 >> loading file special_tokens_map.json
|
26 |
|
27 |
+
[INFO|tokenization_utils_base.py:2159] 2024-07-16 16:46:13,726 >> loading file tokenizer_config.json
|
28 |
|
29 |
+
[WARNING|logging.py:313] 2024-07-16 16:46:13,988 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
30 |
|
31 |
+
[INFO|template.py:270] 2024-07-16 16:46:13,989 >> Replace eos token: <|eot_id|>
|
32 |
|
33 |
+
[INFO|loader.py:50] 2024-07-16 16:46:13,989 >> Loading dataset 0716_truthfulqa_benchmark_test_2.json...
|
34 |
|
35 |
+
07/16/2024 16:46:13 - INFO - llamafactory.hparams.parser - Process rank: 5, device: cuda:5, n_gpu: 1, distributed training: True, compute dtype: None
|
36 |
|
37 |
+
07/16/2024 16:46:14 - WARNING - transformers.tokenization_utils_base - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
38 |
|
39 |
+
07/16/2024 16:46:14 - INFO - llamafactory.data.template - Replace eos token: <|eot_id|>
|
40 |
|
41 |
+
07/16/2024 16:46:14 - WARNING - transformers.tokenization_utils_base - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
42 |
|
43 |
+
07/16/2024 16:46:14 - INFO - llamafactory.data.template - Replace eos token: <|eot_id|>
|
44 |
|
45 |
+
07/16/2024 16:46:14 - WARNING - transformers.tokenization_utils_base - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
46 |
|
47 |
+
07/16/2024 16:46:14 - INFO - llamafactory.data.template - Replace eos token: <|eot_id|>
|
48 |
|
49 |
+
07/16/2024 16:46:14 - WARNING - transformers.tokenization_utils_base - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
50 |
|
51 |
+
07/16/2024 16:46:14 - INFO - llamafactory.data.template - Replace eos token: <|eot_id|>
|
52 |
|
53 |
+
07/16/2024 16:46:15 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_test_2.json...
|
54 |
|
55 |
+
07/16/2024 16:46:15 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_test_2.json...
|
56 |
|
57 |
+
07/16/2024 16:46:15 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_test_2.json...
|
58 |
|
59 |
+
07/16/2024 16:46:15 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_test_2.json...
|
60 |
|
61 |
+
07/16/2024 16:46:15 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_test_2.json...
|
62 |
|
63 |
+
07/16/2024 16:46:15 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_test_2.json...
|
64 |
|
65 |
+
07/16/2024 16:46:15 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_test_2.json...
|
66 |
|
67 |
+
[INFO|configuration_utils.py:731] 2024-07-16 16:46:19,192 >> loading configuration file saves/LLaMA3-8B-Chat/full/train_2024-07-16-15-59-42_llama3_2/config.json
|
68 |
|
69 |
+
[INFO|configuration_utils.py:800] 2024-07-16 16:46:19,193 >> Model config LlamaConfig {
|
70 |
+
"_name_or_path": "saves/LLaMA3-8B-Chat/full/train_2024-07-16-15-59-42_llama3_2",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
"architectures": [
|
72 |
"LlamaForCausalLM"
|
73 |
],
|
|
|
92 |
"tie_word_embeddings": false,
|
93 |
"torch_dtype": "bfloat16",
|
94 |
"transformers_version": "4.42.3",
|
95 |
+
"use_cache": false,
|
96 |
"vocab_size": 128256
|
97 |
}
|
98 |
|
99 |
|
100 |
+
[INFO|patcher.py:81] 2024-07-16 16:46:19,193 >> Using KV cache for faster generation.
|
101 |
+
|
102 |
+
[INFO|modeling_utils.py:3553] 2024-07-16 16:46:19,217 >> loading weights file saves/LLaMA3-8B-Chat/full/train_2024-07-16-15-59-42_llama3_2/model.safetensors.index.json
|
103 |
|
104 |
+
[INFO|modeling_utils.py:1531] 2024-07-16 16:46:19,218 >> Instantiating LlamaForCausalLM model under default dtype torch.bfloat16.
|
105 |
|
106 |
+
[INFO|configuration_utils.py:1000] 2024-07-16 16:46:19,219 >> Generate config GenerationConfig {
|
107 |
"bos_token_id": 128000,
|
108 |
"eos_token_id": 128009
|
109 |
}
|
110 |
|
111 |
|
112 |
+
07/16/2024 16:46:19 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
|
113 |
+
|
114 |
+
07/16/2024 16:46:19 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
|
115 |
+
|
116 |
+
07/16/2024 16:46:19 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
|
117 |
+
|
118 |
+
07/16/2024 16:46:19 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
|
119 |
+
|
120 |
+
07/16/2024 16:46:19 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
|
121 |
|
122 |
+
07/16/2024 16:46:19 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
|
123 |
|
124 |
+
07/16/2024 16:46:19 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
|
125 |
+
|
126 |
+
[INFO|modeling_utils.py:4364] 2024-07-16 16:46:23,245 >> All model checkpoint weights were used when initializing LlamaForCausalLM.
|
127 |
+
|
128 |
+
|
129 |
+
[INFO|modeling_utils.py:4372] 2024-07-16 16:46:23,245 >> All the weights of LlamaForCausalLM were initialized from the model checkpoint at saves/LLaMA3-8B-Chat/full/train_2024-07-16-15-59-42_llama3_2.
|
130 |
If your task is similar to the task the model of the checkpoint was trained on, you can already use LlamaForCausalLM for predictions without further training.
|
131 |
|
132 |
+
[INFO|configuration_utils.py:953] 2024-07-16 16:46:23,249 >> loading configuration file saves/LLaMA3-8B-Chat/full/train_2024-07-16-15-59-42_llama3_2/generation_config.json
|
133 |
|
134 |
+
[INFO|configuration_utils.py:1000] 2024-07-16 16:46:23,249 >> Generate config GenerationConfig {
|
135 |
"bos_token_id": 128000,
|
136 |
"do_sample": true,
|
137 |
"eos_token_id": [
|
|
|
144 |
}
|
145 |
|
146 |
|
147 |
+
[INFO|attention.py:80] 2024-07-16 16:46:23,255 >> Using torch SDPA for faster training and inference.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
|
149 |
+
[INFO|loader.py:196] 2024-07-16 16:46:23,260 >> all params: 8,030,261,248
|
150 |
|
151 |
+
[INFO|trainer.py:3788] 2024-07-16 16:46:23,374 >>
|
152 |
+
***** Running Prediction *****
|
153 |
|
154 |
+
[INFO|trainer.py:3790] 2024-07-16 16:46:23,375 >> Num examples = 1253
|
155 |
|
156 |
+
[INFO|trainer.py:3793] 2024-07-16 16:46:23,375 >> Batch size = 2
|
157 |
|
158 |
+
[WARNING|logging.py:328] 2024-07-16 16:46:24,023 >> We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)
|
159 |
|
160 |
+
07/16/2024 16:46:24 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
161 |
|
162 |
+
07/16/2024 16:46:24 - INFO - llamafactory.model.loader - all params: 8,030,261,248
|
163 |
|
164 |
+
07/16/2024 16:46:24 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
165 |
|
166 |
+
07/16/2024 16:46:24 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
167 |
|
168 |
+
07/16/2024 16:46:24 - INFO - llamafactory.model.loader - all params: 8,030,261,248
|
169 |
|
170 |
+
07/16/2024 16:46:24 - INFO - llamafactory.model.loader - all params: 8,030,261,248
|
171 |
|
172 |
+
07/16/2024 16:46:24 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
173 |
|
174 |
+
07/16/2024 16:46:24 - INFO - llamafactory.model.loader - all params: 8,030,261,248
|
175 |
|
176 |
+
07/16/2024 16:46:24 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
177 |
|
178 |
+
07/16/2024 16:46:24 - INFO - llamafactory.model.loader - all params: 8,030,261,248
|
179 |
|
180 |
+
07/16/2024 16:46:24 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
181 |
|
182 |
+
07/16/2024 16:46:24 - INFO - llamafactory.model.loader - all params: 8,030,261,248
|
183 |
|
184 |
+
07/16/2024 16:46:24 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
185 |
|
186 |
+
07/16/2024 16:46:24 - INFO - llamafactory.model.loader - all params: 8,030,261,248
|
187 |
|
188 |
+
07/16/2024 16:46:25 - WARNING - transformers.models.llama.modeling_llama - We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)
|
189 |
|
190 |
+
07/16/2024 16:46:25 - WARNING - transformers.models.llama.modeling_llama - We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)
|
191 |
|
192 |
+
07/16/2024 16:46:25 - WARNING - transformers.models.llama.modeling_llama - We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)
|
193 |
|
194 |
+
07/16/2024 16:46:25 - WARNING - transformers.models.llama.modeling_llama - We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)
|
195 |
|
196 |
+
07/16/2024 16:46:25 - WARNING - transformers.models.llama.modeling_llama - We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)
|
197 |
|
198 |
+
07/16/2024 16:46:25 - WARNING - transformers.models.llama.modeling_llama - We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)
|
199 |
|
200 |
+
07/16/2024 16:46:25 - WARNING - transformers.models.llama.modeling_llama - We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)
|
201 |
|
202 |
+
[INFO|trainer.py:127] 2024-07-16 16:46:32,944 >> Saving prediction results to saves/LLaMA3-8B-Chat/full/eval_2024-07-16-16-45-32/generated_predictions.jsonl
|
|
|
203 |
|
trainer_log.jsonl
CHANGED
@@ -1,191 +1,15 @@
|
|
1 |
-
{"current_steps":
|
2 |
-
{"current_steps":
|
3 |
-
{"current_steps":
|
4 |
-
{"current_steps":
|
5 |
-
{"current_steps":
|
6 |
-
{"current_steps":
|
7 |
-
{"current_steps":
|
8 |
-
{"current_steps":
|
9 |
-
{"current_steps":
|
10 |
-
{"current_steps":
|
11 |
-
{"current_steps":
|
12 |
-
{"current_steps":
|
13 |
-
{"current_steps":
|
14 |
-
{"current_steps":
|
15 |
-
{"current_steps":
|
16 |
-
{"current_steps": 16, "total_steps": 190, "loss": 0.4424, "learning_rate": 4.986304738420684e-06, "epoch": 0.4129032258064516, "percentage": 8.42, "elapsed_time": "0:03:42", "remaining_time": "0:40:17", "throughput": "458.45", "total_tokens": 101904}
|
17 |
-
{"current_steps": 17, "total_steps": 190, "loss": 0.3336, "learning_rate": 4.981365379103306e-06, "epoch": 0.43870967741935485, "percentage": 8.95, "elapsed_time": "0:03:55", "remaining_time": "0:39:56", "throughput": "459.83", "total_tokens": 108272}
|
18 |
-
{"current_steps": 18, "total_steps": 190, "loss": 0.2568, "learning_rate": 4.975670171853926e-06, "epoch": 0.4645161290322581, "percentage": 9.47, "elapsed_time": "0:04:08", "remaining_time": "0:39:35", "throughput": "460.35", "total_tokens": 114464}
|
19 |
-
{"current_steps": 19, "total_steps": 190, "loss": 0.1889, "learning_rate": 4.9692208514878445e-06, "epoch": 0.49032258064516127, "percentage": 10.0, "elapsed_time": "0:04:21", "remaining_time": "0:39:16", "throughput": "461.44", "total_tokens": 120816}
|
20 |
-
{"current_steps": 20, "total_steps": 190, "loss": 0.1974, "learning_rate": 4.962019382530521e-06, "epoch": 0.5161290322580645, "percentage": 10.53, "elapsed_time": "0:04:34", "remaining_time": "0:38:57", "throughput": "462.27", "total_tokens": 127120}
|
21 |
-
{"current_steps": 21, "total_steps": 190, "loss": 0.1766, "learning_rate": 4.9540679586191605e-06, "epoch": 0.5419354838709678, "percentage": 11.05, "elapsed_time": "0:04:48", "remaining_time": "0:38:39", "throughput": "463.99", "total_tokens": 133712}
|
22 |
-
{"current_steps": 22, "total_steps": 190, "loss": 0.1694, "learning_rate": 4.9453690018345144e-06, "epoch": 0.567741935483871, "percentage": 11.58, "elapsed_time": "0:05:01", "remaining_time": "0:38:21", "throughput": "464.28", "total_tokens": 139904}
|
23 |
-
{"current_steps": 23, "total_steps": 190, "loss": 0.1374, "learning_rate": 4.935925161963089e-06, "epoch": 0.5935483870967742, "percentage": 12.11, "elapsed_time": "0:05:14", "remaining_time": "0:38:03", "throughput": "465.03", "total_tokens": 146256}
|
24 |
-
{"current_steps": 24, "total_steps": 190, "loss": 0.1496, "learning_rate": 4.925739315689991e-06, "epoch": 0.6193548387096774, "percentage": 12.63, "elapsed_time": "0:05:27", "remaining_time": "0:37:46", "throughput": "466.25", "total_tokens": 152784}
|
25 |
-
{"current_steps": 25, "total_steps": 190, "loss": 0.1554, "learning_rate": 4.914814565722671e-06, "epoch": 0.6451612903225806, "percentage": 13.16, "elapsed_time": "0:05:40", "remaining_time": "0:37:29", "throughput": "466.38", "total_tokens": 158976}
|
26 |
-
{"current_steps": 26, "total_steps": 190, "loss": 0.0918, "learning_rate": 4.903154239845798e-06, "epoch": 0.6709677419354839, "percentage": 13.68, "elapsed_time": "0:05:54", "remaining_time": "0:37:13", "throughput": "466.86", "total_tokens": 165280}
|
27 |
-
{"current_steps": 27, "total_steps": 190, "loss": 0.1062, "learning_rate": 4.890761889907589e-06, "epoch": 0.6967741935483871, "percentage": 14.21, "elapsed_time": "0:06:07", "remaining_time": "0:36:56", "throughput": "467.90", "total_tokens": 171808}
|
28 |
-
{"current_steps": 28, "total_steps": 190, "loss": 0.1975, "learning_rate": 4.8776412907378845e-06, "epoch": 0.7225806451612903, "percentage": 14.74, "elapsed_time": "0:06:20", "remaining_time": "0:36:40", "throughput": "468.27", "total_tokens": 178112}
|
29 |
-
{"current_steps": 29, "total_steps": 190, "loss": 0.1389, "learning_rate": 4.863796438998293e-06, "epoch": 0.7483870967741936, "percentage": 15.26, "elapsed_time": "0:06:33", "remaining_time": "0:36:24", "throughput": "468.71", "total_tokens": 184448}
|
30 |
-
{"current_steps": 30, "total_steps": 190, "loss": 0.1382, "learning_rate": 4.849231551964771e-06, "epoch": 0.7741935483870968, "percentage": 15.79, "elapsed_time": "0:06:46", "remaining_time": "0:36:09", "throughput": "469.38", "total_tokens": 190896}
|
31 |
-
{"current_steps": 31, "total_steps": 190, "loss": 0.1982, "learning_rate": 4.833951066243004e-06, "epoch": 0.8, "percentage": 16.32, "elapsed_time": "0:06:59", "remaining_time": "0:35:53", "throughput": "469.24", "total_tokens": 197024}
|
32 |
-
{"current_steps": 32, "total_steps": 190, "loss": 0.1072, "learning_rate": 4.817959636416969e-06, "epoch": 0.8258064516129032, "percentage": 16.84, "elapsed_time": "0:07:13", "remaining_time": "0:35:38", "throughput": "469.35", "total_tokens": 203248}
|
33 |
-
{"current_steps": 33, "total_steps": 190, "loss": 0.0757, "learning_rate": 4.801262133631101e-06, "epoch": 0.8516129032258064, "percentage": 17.37, "elapsed_time": "0:07:26", "remaining_time": "0:35:22", "throughput": "470.10", "total_tokens": 209760}
|
34 |
-
{"current_steps": 34, "total_steps": 190, "loss": 0.0829, "learning_rate": 4.783863644106502e-06, "epoch": 0.8774193548387097, "percentage": 17.89, "elapsed_time": "0:07:39", "remaining_time": "0:35:07", "throughput": "470.13", "total_tokens": 215968}
|
35 |
-
{"current_steps": 35, "total_steps": 190, "loss": 0.1017, "learning_rate": 4.765769467591626e-06, "epoch": 0.9032258064516129, "percentage": 18.42, "elapsed_time": "0:07:52", "remaining_time": "0:34:52", "throughput": "470.20", "total_tokens": 222192}
|
36 |
-
{"current_steps": 36, "total_steps": 190, "loss": 0.0957, "learning_rate": 4.746985115747918e-06, "epoch": 0.9290322580645162, "percentage": 18.95, "elapsed_time": "0:08:05", "remaining_time": "0:34:37", "throughput": "470.47", "total_tokens": 228512}
|
37 |
-
{"current_steps": 37, "total_steps": 190, "loss": 0.0999, "learning_rate": 4.72751631047092e-06, "epoch": 0.9548387096774194, "percentage": 19.47, "elapsed_time": "0:08:18", "remaining_time": "0:34:22", "throughput": "471.30", "total_tokens": 235120}
|
38 |
-
{"current_steps": 38, "total_steps": 190, "loss": 0.0581, "learning_rate": 4.707368982147318e-06, "epoch": 0.9806451612903225, "percentage": 20.0, "elapsed_time": "0:08:32", "remaining_time": "0:34:08", "throughput": "471.80", "total_tokens": 241584}
|
39 |
-
{"current_steps": 39, "total_steps": 190, "loss": 0.0923, "learning_rate": 4.68654926784849e-06, "epoch": 1.0064516129032257, "percentage": 20.53, "elapsed_time": "0:08:45", "remaining_time": "0:33:53", "throughput": "472.34", "total_tokens": 248080}
|
40 |
-
{"current_steps": 40, "total_steps": 190, "loss": 0.0506, "learning_rate": 4.665063509461098e-06, "epoch": 1.032258064516129, "percentage": 21.05, "elapsed_time": "0:08:58", "remaining_time": "0:33:38", "throughput": "472.58", "total_tokens": 254416}
|
41 |
-
{"current_steps": 41, "total_steps": 190, "loss": 0.0333, "learning_rate": 4.642918251755281e-06, "epoch": 1.0580645161290323, "percentage": 21.58, "elapsed_time": "0:09:11", "remaining_time": "0:33:24", "throughput": "472.58", "total_tokens": 260640}
|
42 |
-
{"current_steps": 42, "total_steps": 190, "loss": 0.038, "learning_rate": 4.620120240391065e-06, "epoch": 1.0838709677419356, "percentage": 22.11, "elapsed_time": "0:09:24", "remaining_time": "0:33:09", "throughput": "472.96", "total_tokens": 267072}
|
43 |
-
{"current_steps": 43, "total_steps": 190, "loss": 0.0416, "learning_rate": 4.596676419863561e-06, "epoch": 1.1096774193548387, "percentage": 22.63, "elapsed_time": "0:09:37", "remaining_time": "0:32:55", "throughput": "473.13", "total_tokens": 273392}
|
44 |
-
{"current_steps": 44, "total_steps": 190, "loss": 0.1068, "learning_rate": 4.572593931387604e-06, "epoch": 1.135483870967742, "percentage": 23.16, "elapsed_time": "0:09:51", "remaining_time": "0:32:41", "throughput": "473.22", "total_tokens": 279680}
|
45 |
-
{"current_steps": 45, "total_steps": 190, "loss": 0.0369, "learning_rate": 4.54788011072248e-06, "epoch": 1.1612903225806452, "percentage": 23.68, "elapsed_time": "0:10:04", "remaining_time": "0:32:26", "throughput": "473.32", "total_tokens": 285968}
|
46 |
-
{"current_steps": 46, "total_steps": 190, "loss": 0.1703, "learning_rate": 4.522542485937369e-06, "epoch": 1.1870967741935483, "percentage": 24.21, "elapsed_time": "0:10:17", "remaining_time": "0:32:12", "throughput": "473.49", "total_tokens": 292304}
|
47 |
-
{"current_steps": 47, "total_steps": 190, "loss": 0.1102, "learning_rate": 4.496588775118232e-06, "epoch": 1.2129032258064516, "percentage": 24.74, "elapsed_time": "0:10:30", "remaining_time": "0:31:58", "throughput": "473.60", "total_tokens": 298608}
|
48 |
-
{"current_steps": 48, "total_steps": 190, "loss": 0.0595, "learning_rate": 4.470026884016805e-06, "epoch": 1.238709677419355, "percentage": 25.26, "elapsed_time": "0:10:43", "remaining_time": "0:31:44", "throughput": "473.71", "total_tokens": 304912}
|
49 |
-
{"current_steps": 49, "total_steps": 190, "loss": 0.1009, "learning_rate": 4.442864903642428e-06, "epoch": 1.2645161290322582, "percentage": 25.79, "elapsed_time": "0:10:56", "remaining_time": "0:31:30", "throughput": "473.98", "total_tokens": 311328}
|
50 |
-
{"current_steps": 50, "total_steps": 190, "loss": 0.0434, "learning_rate": 4.415111107797445e-06, "epoch": 1.2903225806451613, "percentage": 26.32, "elapsed_time": "0:11:10", "remaining_time": "0:31:16", "throughput": "474.12", "total_tokens": 317664}
|
51 |
-
{"current_steps": 51, "total_steps": 190, "loss": 0.0281, "learning_rate": 4.386773950556931e-06, "epoch": 1.3161290322580645, "percentage": 26.84, "elapsed_time": "0:11:23", "remaining_time": "0:31:01", "throughput": "474.46", "total_tokens": 324128}
|
52 |
-
{"current_steps": 52, "total_steps": 190, "loss": 0.0513, "learning_rate": 4.357862063693486e-06, "epoch": 1.3419354838709676, "percentage": 27.37, "elapsed_time": "0:11:36", "remaining_time": "0:30:47", "throughput": "474.35", "total_tokens": 330304}
|
53 |
-
{"current_steps": 53, "total_steps": 190, "loss": 0.0902, "learning_rate": 4.328384254047927e-06, "epoch": 1.367741935483871, "percentage": 27.89, "elapsed_time": "0:11:49", "remaining_time": "0:30:34", "throughput": "474.43", "total_tokens": 336608}
|
54 |
-
{"current_steps": 54, "total_steps": 190, "loss": 0.0448, "learning_rate": 4.2983495008466285e-06, "epoch": 1.3935483870967742, "percentage": 28.42, "elapsed_time": "0:12:02", "remaining_time": "0:30:20", "throughput": "474.55", "total_tokens": 342944}
|
55 |
-
{"current_steps": 55, "total_steps": 190, "loss": 0.036, "learning_rate": 4.267766952966369e-06, "epoch": 1.4193548387096775, "percentage": 28.95, "elapsed_time": "0:12:15", "remaining_time": "0:30:06", "throughput": "474.98", "total_tokens": 349504}
|
56 |
-
{"current_steps": 56, "total_steps": 190, "loss": 0.0279, "learning_rate": 4.236645926147493e-06, "epoch": 1.4451612903225808, "percentage": 29.47, "elapsed_time": "0:12:29", "remaining_time": "0:29:52", "throughput": "475.04", "total_tokens": 355808}
|
57 |
-
{"current_steps": 57, "total_steps": 190, "loss": 0.0527, "learning_rate": 4.204995900156247e-06, "epoch": 1.4709677419354839, "percentage": 30.0, "elapsed_time": "0:12:42", "remaining_time": "0:29:38", "throughput": "475.14", "total_tokens": 362144}
|
58 |
-
{"current_steps": 58, "total_steps": 190, "loss": 0.0466, "learning_rate": 4.172826515897146e-06, "epoch": 1.4967741935483871, "percentage": 30.53, "elapsed_time": "0:12:55", "remaining_time": "0:29:24", "throughput": "475.66", "total_tokens": 368800}
|
59 |
-
{"current_steps": 59, "total_steps": 190, "loss": 0.0203, "learning_rate": 4.140147572476269e-06, "epoch": 1.5225806451612902, "percentage": 31.05, "elapsed_time": "0:13:08", "remaining_time": "0:29:10", "throughput": "475.90", "total_tokens": 375264}
|
60 |
-
{"current_steps": 60, "total_steps": 190, "loss": 0.0693, "learning_rate": 4.106969024216348e-06, "epoch": 1.5483870967741935, "percentage": 31.58, "elapsed_time": "0:13:21", "remaining_time": "0:28:57", "throughput": "475.74", "total_tokens": 381408}
|
61 |
-
{"current_steps": 61, "total_steps": 190, "loss": 0.0193, "learning_rate": 4.073300977624594e-06, "epoch": 1.5741935483870968, "percentage": 32.11, "elapsed_time": "0:13:34", "remaining_time": "0:28:43", "throughput": "475.58", "total_tokens": 387552}
|
62 |
-
{"current_steps": 62, "total_steps": 190, "loss": 0.1155, "learning_rate": 4.039153688314146e-06, "epoch": 1.6, "percentage": 32.63, "elapsed_time": "0:13:48", "remaining_time": "0:28:29", "throughput": "475.95", "total_tokens": 394128}
|
63 |
-
{"current_steps": 63, "total_steps": 190, "loss": 0.0594, "learning_rate": 4.0045375578801216e-06, "epoch": 1.6258064516129034, "percentage": 33.16, "elapsed_time": "0:14:01", "remaining_time": "0:28:15", "throughput": "476.06", "total_tokens": 400512}
|
64 |
-
{"current_steps": 64, "total_steps": 190, "loss": 0.0391, "learning_rate": 3.969463130731183e-06, "epoch": 1.6516129032258065, "percentage": 33.68, "elapsed_time": "0:14:14", "remaining_time": "0:28:02", "throughput": "476.02", "total_tokens": 406752}
|
65 |
-
{"current_steps": 65, "total_steps": 190, "loss": 0.0552, "learning_rate": 3.933941090877615e-06, "epoch": 1.6774193548387095, "percentage": 34.21, "elapsed_time": "0:14:27", "remaining_time": "0:27:48", "throughput": "476.02", "total_tokens": 413040}
|
66 |
-
{"current_steps": 66, "total_steps": 190, "loss": 0.03, "learning_rate": 3.897982258676867e-06, "epoch": 1.7032258064516128, "percentage": 34.74, "elapsed_time": "0:14:40", "remaining_time": "0:27:35", "throughput": "476.12", "total_tokens": 419408}
|
67 |
-
{"current_steps": 67, "total_steps": 190, "loss": 0.0458, "learning_rate": 3.861597587537568e-06, "epoch": 1.729032258064516, "percentage": 35.26, "elapsed_time": "0:14:54", "remaining_time": "0:27:21", "throughput": "476.36", "total_tokens": 425920}
|
68 |
-
{"current_steps": 68, "total_steps": 190, "loss": 0.0502, "learning_rate": 3.824798160583012e-06, "epoch": 1.7548387096774194, "percentage": 35.79, "elapsed_time": "0:15:07", "remaining_time": "0:27:07", "throughput": "476.58", "total_tokens": 432400}
|
69 |
-
{"current_steps": 69, "total_steps": 190, "loss": 0.0513, "learning_rate": 3.787595187275136e-06, "epoch": 1.7806451612903227, "percentage": 36.32, "elapsed_time": "0:15:20", "remaining_time": "0:26:54", "throughput": "476.59", "total_tokens": 438688}
|
70 |
-
{"current_steps": 70, "total_steps": 190, "loss": 0.0309, "learning_rate": 3.7500000000000005e-06, "epoch": 1.8064516129032258, "percentage": 36.84, "elapsed_time": "0:15:33", "remaining_time": "0:26:40", "throughput": "476.93", "total_tokens": 445280}
|
71 |
-
{"current_steps": 71, "total_steps": 190, "loss": 0.0889, "learning_rate": 3.7120240506158433e-06, "epoch": 1.832258064516129, "percentage": 37.37, "elapsed_time": "0:15:46", "remaining_time": "0:26:26", "throughput": "476.99", "total_tokens": 451616}
|
72 |
-
{"current_steps": 72, "total_steps": 190, "loss": 0.0868, "learning_rate": 3.6736789069647273e-06, "epoch": 1.8580645161290321, "percentage": 37.89, "elapsed_time": "0:15:59", "remaining_time": "0:26:13", "throughput": "476.95", "total_tokens": 457856}
|
73 |
-
{"current_steps": 73, "total_steps": 190, "loss": 0.0516, "learning_rate": 3.634976249348867e-06, "epoch": 1.8838709677419354, "percentage": 38.42, "elapsed_time": "0:16:13", "remaining_time": "0:25:59", "throughput": "476.96", "total_tokens": 464144}
|
74 |
-
{"current_steps": 74, "total_steps": 190, "loss": 0.059, "learning_rate": 3.595927866972694e-06, "epoch": 1.9096774193548387, "percentage": 38.95, "elapsed_time": "0:16:26", "remaining_time": "0:25:46", "throughput": "477.28", "total_tokens": 470736}
|
75 |
-
{"current_steps": 75, "total_steps": 190, "loss": 0.0475, "learning_rate": 3.556545654351749e-06, "epoch": 1.935483870967742, "percentage": 39.47, "elapsed_time": "0:16:39", "remaining_time": "0:25:32", "throughput": "477.42", "total_tokens": 477168}
|
76 |
-
{"current_steps": 76, "total_steps": 190, "loss": 0.0704, "learning_rate": 3.516841607689501e-06, "epoch": 1.9612903225806453, "percentage": 40.0, "elapsed_time": "0:16:52", "remaining_time": "0:25:18", "throughput": "477.51", "total_tokens": 483536}
|
77 |
-
{"current_steps": 77, "total_steps": 190, "loss": 0.0666, "learning_rate": 3.476827821223184e-06, "epoch": 1.9870967741935484, "percentage": 40.53, "elapsed_time": "0:17:05", "remaining_time": "0:25:05", "throughput": "477.44", "total_tokens": 489760}
|
78 |
-
{"current_steps": 78, "total_steps": 190, "loss": 0.0275, "learning_rate": 3.436516483539781e-06, "epoch": 2.0129032258064514, "percentage": 41.05, "elapsed_time": "0:17:18", "remaining_time": "0:24:51", "throughput": "477.39", "total_tokens": 496000}
|
79 |
-
{"current_steps": 79, "total_steps": 190, "loss": 0.0169, "learning_rate": 3.39591987386325e-06, "epoch": 2.0387096774193547, "percentage": 41.58, "elapsed_time": "0:17:32", "remaining_time": "0:24:38", "throughput": "477.49", "total_tokens": 502384}
|
80 |
-
{"current_steps": 80, "total_steps": 190, "loss": 0.0056, "learning_rate": 3.3550503583141726e-06, "epoch": 2.064516129032258, "percentage": 42.11, "elapsed_time": "0:17:45", "remaining_time": "0:24:24", "throughput": "477.79", "total_tokens": 508992}
|
81 |
-
{"current_steps": 81, "total_steps": 190, "loss": 0.0139, "learning_rate": 3.313920386142892e-06, "epoch": 2.0903225806451613, "percentage": 42.63, "elapsed_time": "0:17:58", "remaining_time": "0:24:11", "throughput": "477.73", "total_tokens": 515216}
|
82 |
-
{"current_steps": 82, "total_steps": 190, "loss": 0.0561, "learning_rate": 3.272542485937369e-06, "epoch": 2.1161290322580646, "percentage": 43.16, "elapsed_time": "0:18:11", "remaining_time": "0:23:57", "throughput": "477.99", "total_tokens": 521792}
|
83 |
-
{"current_steps": 83, "total_steps": 190, "loss": 0.0098, "learning_rate": 3.230929261806842e-06, "epoch": 2.141935483870968, "percentage": 43.68, "elapsed_time": "0:18:24", "remaining_time": "0:23:44", "throughput": "478.07", "total_tokens": 528176}
|
84 |
-
{"current_steps": 84, "total_steps": 190, "loss": 0.0037, "learning_rate": 3.189093389542498e-06, "epoch": 2.167741935483871, "percentage": 44.21, "elapsed_time": "0:18:37", "remaining_time": "0:23:30", "throughput": "478.10", "total_tokens": 534496}
|
85 |
-
{"current_steps": 85, "total_steps": 190, "loss": 0.0194, "learning_rate": 3.147047612756302e-06, "epoch": 2.193548387096774, "percentage": 44.74, "elapsed_time": "0:18:51", "remaining_time": "0:23:17", "throughput": "478.31", "total_tokens": 541024}
|
86 |
-
{"current_steps": 86, "total_steps": 190, "loss": 0.0004, "learning_rate": 3.1048047389991693e-06, "epoch": 2.2193548387096773, "percentage": 45.26, "elapsed_time": "0:19:04", "remaining_time": "0:23:03", "throughput": "478.31", "total_tokens": 547328}
|
87 |
-
{"current_steps": 87, "total_steps": 190, "loss": 0.0003, "learning_rate": 3.062377635859663e-06, "epoch": 2.2451612903225806, "percentage": 45.79, "elapsed_time": "0:19:17", "remaining_time": "0:22:50", "throughput": "478.43", "total_tokens": 553760}
|
88 |
-
{"current_steps": 88, "total_steps": 190, "loss": 0.0511, "learning_rate": 3.019779227044398e-06, "epoch": 2.270967741935484, "percentage": 46.32, "elapsed_time": "0:19:30", "remaining_time": "0:22:36", "throughput": "478.42", "total_tokens": 560048}
|
89 |
-
{"current_steps": 89, "total_steps": 190, "loss": 0.0974, "learning_rate": 2.9770224884413625e-06, "epoch": 2.296774193548387, "percentage": 46.84, "elapsed_time": "0:19:43", "remaining_time": "0:22:23", "throughput": "478.66", "total_tokens": 566624}
|
90 |
-
{"current_steps": 90, "total_steps": 190, "loss": 0.0442, "learning_rate": 2.9341204441673267e-06, "epoch": 2.3225806451612905, "percentage": 47.37, "elapsed_time": "0:19:56", "remaining_time": "0:22:09", "throughput": "478.60", "total_tokens": 572864}
|
91 |
-
{"current_steps": 91, "total_steps": 190, "loss": 0.0802, "learning_rate": 2.8910861626005774e-06, "epoch": 2.3483870967741938, "percentage": 47.89, "elapsed_time": "0:20:10", "remaining_time": "0:21:56", "throughput": "478.49", "total_tokens": 579024}
|
92 |
-
{"current_steps": 92, "total_steps": 190, "loss": 0.0195, "learning_rate": 2.847932752400164e-06, "epoch": 2.3741935483870966, "percentage": 48.42, "elapsed_time": "0:20:23", "remaining_time": "0:21:43", "throughput": "478.66", "total_tokens": 585536}
|
93 |
-
{"current_steps": 93, "total_steps": 190, "loss": 0.055, "learning_rate": 2.804673358512869e-06, "epoch": 2.4, "percentage": 48.95, "elapsed_time": "0:20:36", "remaining_time": "0:21:29", "throughput": "478.62", "total_tokens": 591792}
|
94 |
-
{"current_steps": 94, "total_steps": 190, "loss": 0.0268, "learning_rate": 2.761321158169134e-06, "epoch": 2.425806451612903, "percentage": 49.47, "elapsed_time": "0:20:49", "remaining_time": "0:21:16", "throughput": "478.66", "total_tokens": 598144}
|
95 |
-
{"current_steps": 95, "total_steps": 190, "loss": 0.0196, "learning_rate": 2.717889356869146e-06, "epoch": 2.4516129032258065, "percentage": 50.0, "elapsed_time": "0:21:02", "remaining_time": "0:21:02", "throughput": "478.71", "total_tokens": 604496}
|
96 |
-
{"current_steps": 96, "total_steps": 190, "loss": 0.0363, "learning_rate": 2.6743911843603134e-06, "epoch": 2.47741935483871, "percentage": 50.53, "elapsed_time": "0:21:15", "remaining_time": "0:20:49", "throughput": "478.69", "total_tokens": 610784}
|
97 |
-
{"current_steps": 97, "total_steps": 190, "loss": 0.0046, "learning_rate": 2.6308398906073603e-06, "epoch": 2.5032258064516126, "percentage": 51.05, "elapsed_time": "0:21:29", "remaining_time": "0:20:35", "throughput": "478.64", "total_tokens": 617024}
|
98 |
-
{"current_steps": 98, "total_steps": 190, "loss": 0.0366, "learning_rate": 2.587248741756253e-06, "epoch": 2.5290322580645164, "percentage": 51.58, "elapsed_time": "0:21:42", "remaining_time": "0:20:22", "throughput": "478.64", "total_tokens": 623312}
|
99 |
-
{"current_steps": 99, "total_steps": 190, "loss": 0.0051, "learning_rate": 2.543631016093209e-06, "epoch": 2.554838709677419, "percentage": 52.11, "elapsed_time": "0:21:55", "remaining_time": "0:20:09", "throughput": "478.64", "total_tokens": 629616}
|
100 |
-
{"current_steps": 100, "total_steps": 190, "loss": 0.0226, "learning_rate": 2.5e-06, "epoch": 2.5806451612903225, "percentage": 52.63, "elapsed_time": "0:22:08", "remaining_time": "0:19:55", "throughput": "478.82", "total_tokens": 636144}
|
101 |
-
{"current_steps": 101, "total_steps": 190, "loss": 0.0818, "learning_rate": 2.4563689839067913e-06, "epoch": 2.606451612903226, "percentage": 53.16, "elapsed_time": "0:22:21", "remaining_time": "0:19:42", "throughput": "478.85", "total_tokens": 642496}
|
102 |
-
{"current_steps": 102, "total_steps": 190, "loss": 0.0247, "learning_rate": 2.4127512582437486e-06, "epoch": 2.632258064516129, "percentage": 53.68, "elapsed_time": "0:22:34", "remaining_time": "0:19:28", "throughput": "478.93", "total_tokens": 648912}
|
103 |
-
{"current_steps": 103, "total_steps": 190, "loss": 0.0593, "learning_rate": 2.3691601093926406e-06, "epoch": 2.6580645161290324, "percentage": 54.21, "elapsed_time": "0:22:48", "remaining_time": "0:19:15", "throughput": "478.83", "total_tokens": 655088}
|
104 |
-
{"current_steps": 104, "total_steps": 190, "loss": 0.0073, "learning_rate": 2.325608815639687e-06, "epoch": 2.6838709677419352, "percentage": 54.74, "elapsed_time": "0:23:01", "remaining_time": "0:19:02", "throughput": "479.05", "total_tokens": 661680}
|
105 |
-
{"current_steps": 105, "total_steps": 190, "loss": 0.0295, "learning_rate": 2.2821106431308546e-06, "epoch": 2.709677419354839, "percentage": 55.26, "elapsed_time": "0:23:14", "remaining_time": "0:18:48", "throughput": "479.07", "total_tokens": 668016}
|
106 |
-
{"current_steps": 106, "total_steps": 190, "loss": 0.0115, "learning_rate": 2.238678841830867e-06, "epoch": 2.735483870967742, "percentage": 55.79, "elapsed_time": "0:23:27", "remaining_time": "0:18:35", "throughput": "478.96", "total_tokens": 674176}
|
107 |
-
{"current_steps": 107, "total_steps": 190, "loss": 0.0064, "learning_rate": 2.195326641487132e-06, "epoch": 2.761290322580645, "percentage": 56.32, "elapsed_time": "0:23:40", "remaining_time": "0:18:22", "throughput": "478.95", "total_tokens": 680464}
|
108 |
-
{"current_steps": 108, "total_steps": 190, "loss": 0.0229, "learning_rate": 2.1520672475998374e-06, "epoch": 2.7870967741935484, "percentage": 56.84, "elapsed_time": "0:23:53", "remaining_time": "0:18:08", "throughput": "478.89", "total_tokens": 686688}
|
109 |
-
{"current_steps": 109, "total_steps": 190, "loss": 0.0605, "learning_rate": 2.1089138373994226e-06, "epoch": 2.8129032258064517, "percentage": 57.37, "elapsed_time": "0:24:07", "remaining_time": "0:17:55", "throughput": "478.89", "total_tokens": 692992}
|
110 |
-
{"current_steps": 110, "total_steps": 190, "loss": 0.05, "learning_rate": 2.0658795558326745e-06, "epoch": 2.838709677419355, "percentage": 57.89, "elapsed_time": "0:24:20", "remaining_time": "0:17:42", "throughput": "478.95", "total_tokens": 699392}
|
111 |
-
{"current_steps": 111, "total_steps": 190, "loss": 0.0544, "learning_rate": 2.022977511558638e-06, "epoch": 2.864516129032258, "percentage": 58.42, "elapsed_time": "0:24:33", "remaining_time": "0:17:28", "throughput": "478.93", "total_tokens": 705680}
|
112 |
-
{"current_steps": 112, "total_steps": 190, "loss": 0.0109, "learning_rate": 1.9802207729556023e-06, "epoch": 2.8903225806451616, "percentage": 58.95, "elapsed_time": "0:24:46", "remaining_time": "0:17:15", "throughput": "478.90", "total_tokens": 711952}
|
113 |
-
{"current_steps": 113, "total_steps": 190, "loss": 0.0242, "learning_rate": 1.937622364140338e-06, "epoch": 2.9161290322580644, "percentage": 59.47, "elapsed_time": "0:24:59", "remaining_time": "0:17:01", "throughput": "478.86", "total_tokens": 718192}
|
114 |
-
{"current_steps": 114, "total_steps": 190, "loss": 0.0223, "learning_rate": 1.895195261000831e-06, "epoch": 2.9419354838709677, "percentage": 60.0, "elapsed_time": "0:25:12", "remaining_time": "0:16:48", "throughput": "479.09", "total_tokens": 724832}
|
115 |
-
{"current_steps": 115, "total_steps": 190, "loss": 0.0263, "learning_rate": 1.852952387243698e-06, "epoch": 2.967741935483871, "percentage": 60.53, "elapsed_time": "0:25:26", "remaining_time": "0:16:35", "throughput": "479.20", "total_tokens": 731312}
|
116 |
-
{"current_steps": 116, "total_steps": 190, "loss": 0.0014, "learning_rate": 1.8109066104575023e-06, "epoch": 2.9935483870967743, "percentage": 61.05, "elapsed_time": "0:25:39", "remaining_time": "0:16:21", "throughput": "479.12", "total_tokens": 737488}
|
117 |
-
{"current_steps": 117, "total_steps": 190, "loss": 0.0061, "learning_rate": 1.7690707381931585e-06, "epoch": 3.0193548387096776, "percentage": 61.58, "elapsed_time": "0:25:52", "remaining_time": "0:16:08", "throughput": "479.09", "total_tokens": 743760}
|
118 |
-
{"current_steps": 118, "total_steps": 190, "loss": 0.0296, "learning_rate": 1.7274575140626318e-06, "epoch": 3.0451612903225804, "percentage": 62.11, "elapsed_time": "0:26:05", "remaining_time": "0:15:55", "throughput": "479.08", "total_tokens": 750048}
|
119 |
-
{"current_steps": 119, "total_steps": 190, "loss": 0.0186, "learning_rate": 1.686079613857109e-06, "epoch": 3.0709677419354837, "percentage": 62.63, "elapsed_time": "0:26:18", "remaining_time": "0:15:41", "throughput": "479.11", "total_tokens": 756400}
|
120 |
-
{"current_steps": 120, "total_steps": 190, "loss": 0.0038, "learning_rate": 1.6449496416858285e-06, "epoch": 3.096774193548387, "percentage": 63.16, "elapsed_time": "0:26:31", "remaining_time": "0:15:28", "throughput": "478.93", "total_tokens": 762432}
|
121 |
-
{"current_steps": 121, "total_steps": 190, "loss": 0.0033, "learning_rate": 1.6040801261367494e-06, "epoch": 3.1225806451612903, "percentage": 63.68, "elapsed_time": "0:26:45", "remaining_time": "0:15:15", "throughput": "478.90", "total_tokens": 768688}
|
122 |
-
{"current_steps": 122, "total_steps": 190, "loss": 0.0091, "learning_rate": 1.56348351646022e-06, "epoch": 3.1483870967741936, "percentage": 64.21, "elapsed_time": "0:26:58", "remaining_time": "0:15:01", "throughput": "478.92", "total_tokens": 775024}
|
123 |
-
{"current_steps": 123, "total_steps": 190, "loss": 0.0012, "learning_rate": 1.5231721787768162e-06, "epoch": 3.174193548387097, "percentage": 64.74, "elapsed_time": "0:27:11", "remaining_time": "0:14:48", "throughput": "478.94", "total_tokens": 781360}
|
124 |
-
{"current_steps": 124, "total_steps": 190, "loss": 0.0223, "learning_rate": 1.4831583923105e-06, "epoch": 3.2, "percentage": 65.26, "elapsed_time": "0:27:24", "remaining_time": "0:14:35", "throughput": "479.08", "total_tokens": 787888}
|
125 |
-
{"current_steps": 125, "total_steps": 190, "loss": 0.0131, "learning_rate": 1.443454345648252e-06, "epoch": 3.225806451612903, "percentage": 65.79, "elapsed_time": "0:27:37", "remaining_time": "0:14:22", "throughput": "479.02", "total_tokens": 794112}
|
126 |
-
{"current_steps": 126, "total_steps": 190, "loss": 0.0008, "learning_rate": 1.4040721330273063e-06, "epoch": 3.2516129032258063, "percentage": 66.32, "elapsed_time": "0:27:50", "remaining_time": "0:14:08", "throughput": "479.00", "total_tokens": 800384}
|
127 |
-
{"current_steps": 127, "total_steps": 190, "loss": 0.0058, "learning_rate": 1.3650237506511333e-06, "epoch": 3.2774193548387096, "percentage": 66.84, "elapsed_time": "0:28:04", "remaining_time": "0:13:55", "throughput": "479.10", "total_tokens": 806848}
|
128 |
-
{"current_steps": 128, "total_steps": 190, "loss": 0.0065, "learning_rate": 1.3263210930352737e-06, "epoch": 3.303225806451613, "percentage": 67.37, "elapsed_time": "0:28:17", "remaining_time": "0:13:42", "throughput": "479.09", "total_tokens": 813136}
|
129 |
-
{"current_steps": 129, "total_steps": 190, "loss": 0.0398, "learning_rate": 1.2879759493841577e-06, "epoch": 3.329032258064516, "percentage": 67.89, "elapsed_time": "0:28:30", "remaining_time": "0:13:28", "throughput": "479.13", "total_tokens": 819504}
|
130 |
-
{"current_steps": 130, "total_steps": 190, "loss": 0.0005, "learning_rate": 1.2500000000000007e-06, "epoch": 3.3548387096774195, "percentage": 68.42, "elapsed_time": "0:28:43", "remaining_time": "0:13:15", "throughput": "479.20", "total_tokens": 825936}
|
131 |
-
{"current_steps": 131, "total_steps": 190, "loss": 0.0049, "learning_rate": 1.2124048127248644e-06, "epoch": 3.3806451612903228, "percentage": 68.95, "elapsed_time": "0:28:56", "remaining_time": "0:13:02", "throughput": "479.35", "total_tokens": 832496}
|
132 |
-
{"current_steps": 132, "total_steps": 190, "loss": 0.0061, "learning_rate": 1.1752018394169882e-06, "epoch": 3.4064516129032256, "percentage": 69.47, "elapsed_time": "0:29:09", "remaining_time": "0:12:48", "throughput": "479.38", "total_tokens": 838864}
|
133 |
-
{"current_steps": 133, "total_steps": 190, "loss": 0.0111, "learning_rate": 1.1384024124624324e-06, "epoch": 3.432258064516129, "percentage": 70.0, "elapsed_time": "0:29:23", "remaining_time": "0:12:35", "throughput": "479.56", "total_tokens": 845504}
|
134 |
-
{"current_steps": 134, "total_steps": 190, "loss": 0.0049, "learning_rate": 1.1020177413231334e-06, "epoch": 3.458064516129032, "percentage": 70.53, "elapsed_time": "0:29:36", "remaining_time": "0:12:22", "throughput": "479.60", "total_tokens": 851888}
|
135 |
-
{"current_steps": 135, "total_steps": 190, "loss": 0.0012, "learning_rate": 1.0660589091223854e-06, "epoch": 3.4838709677419355, "percentage": 71.05, "elapsed_time": "0:29:49", "remaining_time": "0:12:09", "throughput": "479.57", "total_tokens": 858144}
|
136 |
-
{"current_steps": 136, "total_steps": 190, "loss": 0.0004, "learning_rate": 1.0305368692688175e-06, "epoch": 3.509677419354839, "percentage": 71.58, "elapsed_time": "0:30:02", "remaining_time": "0:11:55", "throughput": "479.59", "total_tokens": 864496}
|
137 |
-
{"current_steps": 137, "total_steps": 190, "loss": 0.0006, "learning_rate": 9.95462442119879e-07, "epoch": 3.535483870967742, "percentage": 72.11, "elapsed_time": "0:30:15", "remaining_time": "0:11:42", "throughput": "479.51", "total_tokens": 870672}
|
138 |
-
{"current_steps": 138, "total_steps": 190, "loss": 0.0003, "learning_rate": 9.608463116858544e-07, "epoch": 3.5612903225806454, "percentage": 72.63, "elapsed_time": "0:30:28", "remaining_time": "0:11:29", "throughput": "479.49", "total_tokens": 876944}
|
139 |
-
{"current_steps": 139, "total_steps": 190, "loss": 0.0004, "learning_rate": 9.266990223754069e-07, "epoch": 3.587096774193548, "percentage": 73.16, "elapsed_time": "0:30:42", "remaining_time": "0:11:15", "throughput": "479.61", "total_tokens": 883488}
|
140 |
-
{"current_steps": 140, "total_steps": 190, "loss": 0.0016, "learning_rate": 8.930309757836517e-07, "epoch": 3.6129032258064515, "percentage": 73.68, "elapsed_time": "0:30:55", "remaining_time": "0:11:02", "throughput": "479.62", "total_tokens": 889824}
|
141 |
-
{"current_steps": 141, "total_steps": 190, "loss": 0.0268, "learning_rate": 8.598524275237321e-07, "epoch": 3.638709677419355, "percentage": 74.21, "elapsed_time": "0:31:08", "remaining_time": "0:10:49", "throughput": "479.64", "total_tokens": 896176}
|
142 |
-
{"current_steps": 142, "total_steps": 190, "loss": 0.0018, "learning_rate": 8.271734841028553e-07, "epoch": 3.664516129032258, "percentage": 74.74, "elapsed_time": "0:31:21", "remaining_time": "0:10:36", "throughput": "479.52", "total_tokens": 902272}
|
143 |
-
{"current_steps": 143, "total_steps": 190, "loss": 0.01, "learning_rate": 7.950040998437541e-07, "epoch": 3.6903225806451614, "percentage": 75.26, "elapsed_time": "0:31:34", "remaining_time": "0:10:22", "throughput": "479.48", "total_tokens": 908512}
|
144 |
-
{"current_steps": 144, "total_steps": 190, "loss": 0.0209, "learning_rate": 7.633540738525066e-07, "epoch": 3.7161290322580647, "percentage": 75.79, "elapsed_time": "0:31:47", "remaining_time": "0:10:09", "throughput": "479.66", "total_tokens": 915152}
|
145 |
-
{"current_steps": 145, "total_steps": 190, "loss": 0.0076, "learning_rate": 7.322330470336314e-07, "epoch": 3.741935483870968, "percentage": 76.32, "elapsed_time": "0:32:01", "remaining_time": "0:09:56", "throughput": "479.70", "total_tokens": 921552}
|
146 |
-
{"current_steps": 146, "total_steps": 190, "loss": 0.0227, "learning_rate": 7.016504991533727e-07, "epoch": 3.767741935483871, "percentage": 76.84, "elapsed_time": "0:32:14", "remaining_time": "0:09:42", "throughput": "479.79", "total_tokens": 928048}
|
147 |
-
{"current_steps": 147, "total_steps": 190, "loss": 0.0002, "learning_rate": 6.716157459520739e-07, "epoch": 3.793548387096774, "percentage": 77.37, "elapsed_time": "0:32:27", "remaining_time": "0:09:29", "throughput": "479.87", "total_tokens": 934512}
|
148 |
-
{"current_steps": 148, "total_steps": 190, "loss": 0.0296, "learning_rate": 6.421379363065142e-07, "epoch": 3.8193548387096774, "percentage": 77.89, "elapsed_time": "0:32:40", "remaining_time": "0:09:16", "throughput": "479.86", "total_tokens": 940816}
|
149 |
-
{"current_steps": 149, "total_steps": 190, "loss": 0.0006, "learning_rate": 6.1322604944307e-07, "epoch": 3.8451612903225807, "percentage": 78.42, "elapsed_time": "0:32:53", "remaining_time": "0:09:03", "throughput": "479.79", "total_tokens": 946992}
|
150 |
-
{"current_steps": 150, "total_steps": 190, "loss": 0.0012, "learning_rate": 5.848888922025553e-07, "epoch": 3.870967741935484, "percentage": 78.95, "elapsed_time": "0:33:06", "remaining_time": "0:08:49", "throughput": "479.85", "total_tokens": 953424}
|
151 |
-
{"current_steps": 151, "total_steps": 190, "loss": 0.0007, "learning_rate": 5.571350963575728e-07, "epoch": 3.896774193548387, "percentage": 79.47, "elapsed_time": "0:33:20", "remaining_time": "0:08:36", "throughput": "479.79", "total_tokens": 959616}
|
152 |
-
{"current_steps": 152, "total_steps": 190, "loss": 0.0003, "learning_rate": 5.299731159831953e-07, "epoch": 3.9225806451612906, "percentage": 80.0, "elapsed_time": "0:33:33", "remaining_time": "0:08:23", "throughput": "479.87", "total_tokens": 966096}
|
153 |
-
{"current_steps": 153, "total_steps": 190, "loss": 0.0005, "learning_rate": 5.034112248817685e-07, "epoch": 3.9483870967741934, "percentage": 80.53, "elapsed_time": "0:33:46", "remaining_time": "0:08:10", "throughput": "479.85", "total_tokens": 972368}
|
154 |
-
{"current_steps": 154, "total_steps": 190, "loss": 0.0008, "learning_rate": 4.774575140626317e-07, "epoch": 3.9741935483870967, "percentage": 81.05, "elapsed_time": "0:33:59", "remaining_time": "0:07:56", "throughput": "479.92", "total_tokens": 978848}
|
155 |
-
{"current_steps": 155, "total_steps": 190, "loss": 0.0003, "learning_rate": 4.5211988927752026e-07, "epoch": 4.0, "percentage": 81.58, "elapsed_time": "0:34:12", "remaining_time": "0:07:43", "throughput": "480.10", "total_tokens": 985520}
|
156 |
-
{"current_steps": 156, "total_steps": 190, "loss": 0.0015, "learning_rate": 4.27406068612396e-07, "epoch": 4.025806451612903, "percentage": 82.11, "elapsed_time": "0:34:25", "remaining_time": "0:07:30", "throughput": "480.13", "total_tokens": 991904}
|
157 |
-
{"current_steps": 157, "total_steps": 190, "loss": 0.0007, "learning_rate": 4.033235801364402e-07, "epoch": 4.051612903225807, "percentage": 82.63, "elapsed_time": "0:34:39", "remaining_time": "0:07:16", "throughput": "480.16", "total_tokens": 998288}
|
158 |
-
{"current_steps": 158, "total_steps": 190, "loss": 0.0002, "learning_rate": 3.798797596089351e-07, "epoch": 4.077419354838709, "percentage": 83.16, "elapsed_time": "0:34:52", "remaining_time": "0:07:03", "throughput": "480.08", "total_tokens": 1004432}
|
159 |
-
{"current_steps": 159, "total_steps": 190, "loss": 0.0052, "learning_rate": 3.5708174824471947e-07, "epoch": 4.103225806451613, "percentage": 83.68, "elapsed_time": "0:35:05", "remaining_time": "0:06:50", "throughput": "480.01", "total_tokens": 1010608}
|
160 |
-
{"current_steps": 160, "total_steps": 190, "loss": 0.004, "learning_rate": 3.3493649053890325e-07, "epoch": 4.129032258064516, "percentage": 84.21, "elapsed_time": "0:35:18", "remaining_time": "0:06:37", "throughput": "479.97", "total_tokens": 1016848}
|
161 |
-
{"current_steps": 161, "total_steps": 190, "loss": 0.0004, "learning_rate": 3.134507321515107e-07, "epoch": 4.15483870967742, "percentage": 84.74, "elapsed_time": "0:35:31", "remaining_time": "0:06:23", "throughput": "480.06", "total_tokens": 1023360}
|
162 |
-
{"current_steps": 162, "total_steps": 190, "loss": 0.002, "learning_rate": 2.9263101785268253e-07, "epoch": 4.180645161290323, "percentage": 85.26, "elapsed_time": "0:35:44", "remaining_time": "0:06:10", "throughput": "480.12", "total_tokens": 1029808}
|
163 |
-
{"current_steps": 163, "total_steps": 190, "loss": 0.0001, "learning_rate": 2.7248368952908055e-07, "epoch": 4.2064516129032254, "percentage": 85.79, "elapsed_time": "0:35:58", "remaining_time": "0:05:57", "throughput": "480.10", "total_tokens": 1036080}
|
164 |
-
{"current_steps": 164, "total_steps": 190, "loss": 0.0001, "learning_rate": 2.53014884252083e-07, "epoch": 4.232258064516129, "percentage": 86.32, "elapsed_time": "0:36:11", "remaining_time": "0:05:44", "throughput": "480.03", "total_tokens": 1042240}
|
165 |
-
{"current_steps": 165, "total_steps": 190, "loss": 0.0002, "learning_rate": 2.3423053240837518e-07, "epoch": 4.258064516129032, "percentage": 86.84, "elapsed_time": "0:36:24", "remaining_time": "0:05:30", "throughput": "480.08", "total_tokens": 1048672}
|
166 |
-
{"current_steps": 166, "total_steps": 190, "loss": 0.0076, "learning_rate": 2.1613635589349756e-07, "epoch": 4.283870967741936, "percentage": 87.37, "elapsed_time": "0:36:37", "remaining_time": "0:05:17", "throughput": "480.00", "total_tokens": 1054832}
|
167 |
-
{"current_steps": 167, "total_steps": 190, "loss": 0.0001, "learning_rate": 1.9873786636889908e-07, "epoch": 4.309677419354839, "percentage": 87.89, "elapsed_time": "0:36:50", "remaining_time": "0:05:04", "throughput": "480.08", "total_tokens": 1061312}
|
168 |
-
{"current_steps": 168, "total_steps": 190, "loss": 0.0002, "learning_rate": 1.8204036358303173e-07, "epoch": 4.335483870967742, "percentage": 88.42, "elapsed_time": "0:37:03", "remaining_time": "0:04:51", "throughput": "480.02", "total_tokens": 1067488}
|
169 |
-
{"current_steps": 169, "total_steps": 190, "loss": 0.0001, "learning_rate": 1.6604893375699594e-07, "epoch": 4.361290322580645, "percentage": 88.95, "elapsed_time": "0:37:17", "remaining_time": "0:04:37", "throughput": "479.94", "total_tokens": 1073648}
|
170 |
-
{"current_steps": 170, "total_steps": 190, "loss": 0.0001, "learning_rate": 1.507684480352292e-07, "epoch": 4.387096774193548, "percentage": 89.47, "elapsed_time": "0:37:30", "remaining_time": "0:04:24", "throughput": "480.03", "total_tokens": 1080160}
|
171 |
-
{"current_steps": 171, "total_steps": 190, "loss": 0.0002, "learning_rate": 1.362035610017079e-07, "epoch": 4.412903225806452, "percentage": 90.0, "elapsed_time": "0:37:43", "remaining_time": "0:04:11", "throughput": "480.18", "total_tokens": 1086832}
|
172 |
-
{"current_steps": 172, "total_steps": 190, "loss": 0.0001, "learning_rate": 1.223587092621162e-07, "epoch": 4.438709677419355, "percentage": 90.53, "elapsed_time": "0:37:56", "remaining_time": "0:03:58", "throughput": "480.20", "total_tokens": 1093184}
|
173 |
-
{"current_steps": 173, "total_steps": 190, "loss": 0.0005, "learning_rate": 1.0923811009241142e-07, "epoch": 4.464516129032258, "percentage": 91.05, "elapsed_time": "0:38:09", "remaining_time": "0:03:45", "throughput": "480.29", "total_tokens": 1099728}
|
174 |
-
{"current_steps": 174, "total_steps": 190, "loss": 0.0001, "learning_rate": 9.684576015420277e-08, "epoch": 4.490322580645161, "percentage": 91.58, "elapsed_time": "0:38:22", "remaining_time": "0:03:31", "throughput": "480.29", "total_tokens": 1106032}
|
175 |
-
{"current_steps": 175, "total_steps": 190, "loss": 0.0001, "learning_rate": 8.518543427732951e-08, "epoch": 4.516129032258064, "percentage": 92.11, "elapsed_time": "0:38:36", "remaining_time": "0:03:18", "throughput": "480.35", "total_tokens": 1112496}
|
176 |
-
{"current_steps": 176, "total_steps": 190, "loss": 0.0081, "learning_rate": 7.426068431000883e-08, "epoch": 4.541935483870968, "percentage": 92.63, "elapsed_time": "0:38:49", "remaining_time": "0:03:05", "throughput": "480.49", "total_tokens": 1119152}
|
177 |
-
{"current_steps": 177, "total_steps": 190, "loss": 0.0002, "learning_rate": 6.407483803691216e-08, "epoch": 4.567741935483871, "percentage": 93.16, "elapsed_time": "0:39:02", "remaining_time": "0:02:52", "throughput": "480.44", "total_tokens": 1125360}
|
178 |
-
{"current_steps": 178, "total_steps": 190, "loss": 0.0003, "learning_rate": 5.463099816548578e-08, "epoch": 4.593548387096774, "percentage": 93.68, "elapsed_time": "0:39:15", "remaining_time": "0:02:38", "throughput": "480.50", "total_tokens": 1131824}
|
179 |
-
{"current_steps": 179, "total_steps": 190, "loss": 0.0001, "learning_rate": 4.593204138084006e-08, "epoch": 4.619354838709677, "percentage": 94.21, "elapsed_time": "0:39:28", "remaining_time": "0:02:25", "throughput": "480.53", "total_tokens": 1138224}
|
180 |
-
{"current_steps": 180, "total_steps": 190, "loss": 0.0005, "learning_rate": 3.798061746947995e-08, "epoch": 4.645161290322581, "percentage": 94.74, "elapsed_time": "0:39:41", "remaining_time": "0:02:12", "throughput": "480.53", "total_tokens": 1144528}
|
181 |
-
{"current_steps": 181, "total_steps": 190, "loss": 0.0001, "learning_rate": 3.077914851215585e-08, "epoch": 4.670967741935484, "percentage": 95.26, "elapsed_time": "0:39:54", "remaining_time": "0:01:59", "throughput": "480.54", "total_tokens": 1150880}
|
182 |
-
{"current_steps": 182, "total_steps": 190, "loss": 0.0002, "learning_rate": 2.4329828146074096e-08, "epoch": 4.6967741935483875, "percentage": 95.79, "elapsed_time": "0:40:08", "remaining_time": "0:01:45", "throughput": "480.53", "total_tokens": 1157184}
|
183 |
-
{"current_steps": 183, "total_steps": 190, "loss": 0.0001, "learning_rate": 1.8634620896695044e-08, "epoch": 4.72258064516129, "percentage": 96.32, "elapsed_time": "0:40:21", "remaining_time": "0:01:32", "throughput": "480.54", "total_tokens": 1163536}
|
184 |
-
{"current_steps": 184, "total_steps": 190, "loss": 0.0002, "learning_rate": 1.3695261579316776e-08, "epoch": 4.748387096774193, "percentage": 96.84, "elapsed_time": "0:40:34", "remaining_time": "0:01:19", "throughput": "480.55", "total_tokens": 1169888}
|
185 |
-
{"current_steps": 185, "total_steps": 190, "loss": 0.0002, "learning_rate": 9.513254770636138e-09, "epoch": 4.774193548387097, "percentage": 97.37, "elapsed_time": "0:40:47", "remaining_time": "0:01:06", "throughput": "480.63", "total_tokens": 1176400}
|
186 |
-
{"current_steps": 186, "total_steps": 190, "loss": 0.0002, "learning_rate": 6.089874350439507e-09, "epoch": 4.8, "percentage": 97.89, "elapsed_time": "0:41:00", "remaining_time": "0:00:52", "throughput": "480.58", "total_tokens": 1182608}
|
187 |
-
{"current_steps": 187, "total_steps": 190, "loss": 0.0001, "learning_rate": 3.4261631135654174e-09, "epoch": 4.825806451612904, "percentage": 98.42, "elapsed_time": "0:41:13", "remaining_time": "0:00:39", "throughput": "480.61", "total_tokens": 1189008}
|
188 |
-
{"current_steps": 188, "total_steps": 190, "loss": 0.0004, "learning_rate": 1.5229324522605949e-09, "epoch": 4.851612903225806, "percentage": 98.95, "elapsed_time": "0:41:27", "remaining_time": "0:00:26", "throughput": "480.59", "total_tokens": 1195280}
|
189 |
-
{"current_steps": 189, "total_steps": 190, "loss": 0.0013, "learning_rate": 3.8076210902182607e-10, "epoch": 4.877419354838709, "percentage": 99.47, "elapsed_time": "0:41:40", "remaining_time": "0:00:13", "throughput": "480.53", "total_tokens": 1201456}
|
190 |
-
{"current_steps": 190, "total_steps": 190, "loss": 0.0008, "learning_rate": 0.0, "epoch": 4.903225806451613, "percentage": 100.0, "elapsed_time": "0:41:53", "remaining_time": "0:00:00", "throughput": "480.52", "total_tokens": 1207760}
|
191 |
-
{"current_steps": 190, "total_steps": 190, "epoch": 4.903225806451613, "percentage": 100.0, "elapsed_time": "0:42:55", "remaining_time": "0:00:00", "throughput": "468.99", "total_tokens": 1207760}
|
|
|
1 |
+
{"current_steps": 5, "total_steps": 79, "percentage": 6.33, "elapsed_time": "0:00:00", "remaining_time": "0:00:04"}
|
2 |
+
{"current_steps": 10, "total_steps": 79, "percentage": 12.66, "elapsed_time": "0:00:00", "remaining_time": "0:00:05"}
|
3 |
+
{"current_steps": 15, "total_steps": 79, "percentage": 18.99, "elapsed_time": "0:00:01", "remaining_time": "0:00:04"}
|
4 |
+
{"current_steps": 20, "total_steps": 79, "percentage": 25.32, "elapsed_time": "0:00:01", "remaining_time": "0:00:04"}
|
5 |
+
{"current_steps": 25, "total_steps": 79, "percentage": 31.65, "elapsed_time": "0:00:01", "remaining_time": "0:00:04"}
|
6 |
+
{"current_steps": 30, "total_steps": 79, "percentage": 37.97, "elapsed_time": "0:00:02", "remaining_time": "0:00:03"}
|
7 |
+
{"current_steps": 35, "total_steps": 79, "percentage": 44.3, "elapsed_time": "0:00:02", "remaining_time": "0:00:03"}
|
8 |
+
{"current_steps": 40, "total_steps": 79, "percentage": 50.63, "elapsed_time": "0:00:03", "remaining_time": "0:00:03"}
|
9 |
+
{"current_steps": 45, "total_steps": 79, "percentage": 56.96, "elapsed_time": "0:00:03", "remaining_time": "0:00:02"}
|
10 |
+
{"current_steps": 50, "total_steps": 79, "percentage": 63.29, "elapsed_time": "0:00:03", "remaining_time": "0:00:02"}
|
11 |
+
{"current_steps": 55, "total_steps": 79, "percentage": 69.62, "elapsed_time": "0:00:04", "remaining_time": "0:00:01"}
|
12 |
+
{"current_steps": 60, "total_steps": 79, "percentage": 75.95, "elapsed_time": "0:00:04", "remaining_time": "0:00:01"}
|
13 |
+
{"current_steps": 65, "total_steps": 79, "percentage": 82.28, "elapsed_time": "0:00:05", "remaining_time": "0:00:01"}
|
14 |
+
{"current_steps": 70, "total_steps": 79, "percentage": 88.61, "elapsed_time": "0:00:05", "remaining_time": "0:00:00"}
|
15 |
+
{"current_steps": 75, "total_steps": 79, "percentage": 94.94, "elapsed_time": "0:00:05", "remaining_time": "0:00:00"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
training_args.yaml
CHANGED
@@ -1,30 +1,18 @@
|
|
1 |
-
bf16: true
|
2 |
cutoff_len: 1024
|
3 |
-
dataset:
|
4 |
dataset_dir: data
|
5 |
-
|
6 |
-
deepspeed: cache/ds_z2_config.json
|
7 |
-
do_train: true
|
8 |
finetuning_type: full
|
9 |
flash_attn: auto
|
10 |
-
|
11 |
-
include_num_input_tokens_seen: true
|
12 |
-
learning_rate: 5.0e-06
|
13 |
-
logging_steps: 1
|
14 |
-
lr_scheduler_type: cosine
|
15 |
-
max_grad_norm: 1.0
|
16 |
max_samples: 100000
|
17 |
-
model_name_or_path:
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
packing: false
|
22 |
-
per_device_train_batch_size: 2
|
23 |
-
plot_loss: true
|
24 |
preprocessing_num_workers: 16
|
25 |
quantization_method: bitsandbytes
|
26 |
-
report_to: none
|
27 |
-
save_steps: 1000
|
28 |
stage: sft
|
|
|
29 |
template: llama3
|
30 |
-
|
|
|
|
|
1 |
cutoff_len: 1024
|
2 |
+
dataset: truth_dev_0716_2
|
3 |
dataset_dir: data
|
4 |
+
do_predict: true
|
|
|
|
|
5 |
finetuning_type: full
|
6 |
flash_attn: auto
|
7 |
+
max_new_tokens: 512
|
|
|
|
|
|
|
|
|
|
|
8 |
max_samples: 100000
|
9 |
+
model_name_or_path: saves/LLaMA3-8B-Chat/full/train_2024-07-16-15-59-42_llama3_2
|
10 |
+
output_dir: saves/LLaMA3-8B-Chat/full/eval_2024-07-16-16-45-32
|
11 |
+
per_device_eval_batch_size: 2
|
12 |
+
predict_with_generate: true
|
|
|
|
|
|
|
13 |
preprocessing_num_workers: 16
|
14 |
quantization_method: bitsandbytes
|
|
|
|
|
15 |
stage: sft
|
16 |
+
temperature: 0.95
|
17 |
template: llama3
|
18 |
+
top_p: 0.7
|