second commit
Browse files- all_results.json +7 -7
- generated_predictions.jsonl +0 -0
- llamaboard_config.yaml +12 -57
- predict_results.json +9 -0
- running_log.txt +86 -544
- trainer_log.jsonl +15 -191
- training_args.yaml +9 -21
all_results.json
CHANGED
@@ -1,9 +1,9 @@
|
|
1 |
{
|
2 |
-
"
|
3 |
-
"
|
4 |
-
"
|
5 |
-
"
|
6 |
-
"
|
7 |
-
"
|
8 |
-
"
|
9 |
}
|
|
|
1 |
{
|
2 |
+
"predict_bleu-4": 88.09424799679488,
|
3 |
+
"predict_rouge-1": 95.67307692307692,
|
4 |
+
"predict_rouge-2": 0.0,
|
5 |
+
"predict_rouge-l": 95.67307692307692,
|
6 |
+
"predict_runtime": 9.3909,
|
7 |
+
"predict_samples_per_second": 132.363,
|
8 |
+
"predict_steps_per_second": 8.306
|
9 |
}
|
generated_predictions.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llamaboard_config.yaml
CHANGED
@@ -1,5 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
top.booster: auto
|
2 |
-
top.checkpoint_path:
|
3 |
top.finetuning_type: full
|
4 |
top.model_name: LLaMA3-8B-Chat
|
5 |
top.quantization_bit: none
|
@@ -7,59 +18,3 @@ top.quantization_method: bitsandbytes
|
|
7 |
top.rope_scaling: none
|
8 |
top.template: llama3
|
9 |
top.visual_inputs: false
|
10 |
-
train.additional_target: ''
|
11 |
-
train.badam_mode: layer
|
12 |
-
train.badam_switch_interval: 50
|
13 |
-
train.badam_switch_mode: ascending
|
14 |
-
train.badam_update_ratio: 0.05
|
15 |
-
train.batch_size: 2
|
16 |
-
train.compute_type: bf16
|
17 |
-
train.create_new_adapter: false
|
18 |
-
train.cutoff_len: 1024
|
19 |
-
train.dataset:
|
20 |
-
- truth_train_0716
|
21 |
-
train.dataset_dir: data
|
22 |
-
train.ds_offload: false
|
23 |
-
train.ds_stage: '2'
|
24 |
-
train.freeze_extra_modules: ''
|
25 |
-
train.freeze_trainable_layers: 2
|
26 |
-
train.freeze_trainable_modules: all
|
27 |
-
train.galore_rank: 16
|
28 |
-
train.galore_scale: 0.25
|
29 |
-
train.galore_target: all
|
30 |
-
train.galore_update_interval: 200
|
31 |
-
train.gradient_accumulation_steps: 8
|
32 |
-
train.learning_rate: 5e-6
|
33 |
-
train.logging_steps: 1
|
34 |
-
train.lora_alpha: 16
|
35 |
-
train.lora_dropout: 0
|
36 |
-
train.lora_rank: 8
|
37 |
-
train.lora_target: ''
|
38 |
-
train.loraplus_lr_ratio: 0
|
39 |
-
train.lr_scheduler_type: cosine
|
40 |
-
train.max_grad_norm: '1.0'
|
41 |
-
train.max_samples: '100000'
|
42 |
-
train.neat_packing: false
|
43 |
-
train.neftune_alpha: 0
|
44 |
-
train.num_train_epochs: '5.0'
|
45 |
-
train.optim: adamw_torch
|
46 |
-
train.packing: false
|
47 |
-
train.ppo_score_norm: false
|
48 |
-
train.ppo_whiten_rewards: false
|
49 |
-
train.pref_beta: 0.1
|
50 |
-
train.pref_ftx: 0
|
51 |
-
train.pref_loss: sigmoid
|
52 |
-
train.report_to: false
|
53 |
-
train.resize_vocab: false
|
54 |
-
train.reward_model: null
|
55 |
-
train.save_steps: 1000
|
56 |
-
train.shift_attn: false
|
57 |
-
train.training_stage: Supervised Fine-Tuning
|
58 |
-
train.use_badam: false
|
59 |
-
train.use_dora: false
|
60 |
-
train.use_galore: false
|
61 |
-
train.use_llama_pro: false
|
62 |
-
train.use_pissa: false
|
63 |
-
train.use_rslora: false
|
64 |
-
train.val_size: 0
|
65 |
-
train.warmup_steps: 10
|
|
|
1 |
+
eval.batch_size: 2
|
2 |
+
eval.cutoff_len: 1024
|
3 |
+
eval.dataset:
|
4 |
+
- truth_dev_0716
|
5 |
+
eval.dataset_dir: data
|
6 |
+
eval.max_new_tokens: 512
|
7 |
+
eval.max_samples: '100000'
|
8 |
+
eval.output_dir: eval_2024-07-23-06-52-5_llama3
|
9 |
+
eval.predict: true
|
10 |
+
eval.temperature: 0.95
|
11 |
+
eval.top_p: 0.7
|
12 |
top.booster: auto
|
13 |
+
top.checkpoint_path: train_2024-07-23-06-00-05_llama3
|
14 |
top.finetuning_type: full
|
15 |
top.model_name: LLaMA3-8B-Chat
|
16 |
top.quantization_bit: none
|
|
|
18 |
top.rope_scaling: none
|
19 |
top.template: llama3
|
20 |
top.visual_inputs: false
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
predict_results.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"predict_bleu-4": 88.09424799679488,
|
3 |
+
"predict_rouge-1": 95.67307692307692,
|
4 |
+
"predict_rouge-2": 0.0,
|
5 |
+
"predict_rouge-l": 95.67307692307692,
|
6 |
+
"predict_runtime": 9.3909,
|
7 |
+
"predict_samples_per_second": 132.363,
|
8 |
+
"predict_steps_per_second": 8.306
|
9 |
+
}
|
running_log.txt
CHANGED
@@ -1,95 +1,79 @@
|
|
1 |
-
07/23/2024 06:
|
2 |
|
3 |
-
07/23/2024 06:
|
4 |
|
5 |
-
|
6 |
|
7 |
-
07/23/2024 06:
|
8 |
|
9 |
-
07/23/2024 06:
|
10 |
|
11 |
-
07
|
12 |
|
13 |
-
|
14 |
|
15 |
-
[INFO|tokenization_utils_base.py:
|
16 |
|
17 |
-
07/23/2024 06:01
|
18 |
|
19 |
-
07/23/2024 06:01
|
20 |
|
21 |
-
07/23/2024 06:01
|
22 |
|
23 |
-
07/23/2024 06:01
|
24 |
|
25 |
-
07/23/2024 06:01
|
26 |
|
27 |
-
07/23/2024 06:01
|
28 |
|
29 |
-
07/23/2024 06:01
|
30 |
|
31 |
-
07/23/2024 06:01
|
32 |
|
33 |
-
07
|
34 |
|
35 |
-
07
|
36 |
|
37 |
-
[
|
38 |
|
39 |
-
[INFO|
|
40 |
|
41 |
-
[INFO|
|
42 |
|
43 |
-
|
44 |
|
45 |
-
|
46 |
|
47 |
-
|
48 |
|
49 |
-
|
50 |
|
51 |
-
07/23/2024 06:01
|
52 |
|
53 |
-
07/23/2024 06:01
|
54 |
|
55 |
-
07/23/2024 06:01
|
56 |
|
57 |
-
07/23/2024 06:01
|
58 |
|
59 |
-
07/23/2024 06:
|
60 |
|
61 |
-
07/23/2024 06:
|
62 |
|
63 |
-
07/23/2024 06:
|
64 |
|
65 |
-
07/23/2024 06:
|
66 |
|
67 |
-
07/23/2024 06:
|
68 |
|
69 |
-
07/23/2024 06:
|
70 |
|
71 |
-
07/23/2024 06:
|
72 |
|
73 |
-
07
|
74 |
|
75 |
-
07
|
76 |
-
|
77 |
-
07/23/2024 06:01:19 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_train.json...
|
78 |
-
|
79 |
-
07/23/2024 06:01:19 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_train.json...
|
80 |
-
|
81 |
-
07/23/2024 06:01:19 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_train.json...
|
82 |
-
|
83 |
-
07/23/2024 06:01:19 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_train.json...
|
84 |
-
|
85 |
-
07/23/2024 06:01:19 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_train.json...
|
86 |
-
|
87 |
-
07/23/2024 06:01:19 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_train.json...
|
88 |
-
|
89 |
-
[INFO|configuration_utils.py:733] 2024-07-23 06:01:20,503 >> loading configuration file config.json from cache at /root/.cache/huggingface/hub/models--meta-llama--Meta-Llama-3-8B-Instruct/snapshots/e1945c40cd546c78e41f1151f4db032b271faeaa/config.json
|
90 |
-
|
91 |
-
[INFO|configuration_utils.py:800] 2024-07-23 06:01:20,505 >> Model config LlamaConfig {
|
92 |
-
"_name_or_path": "meta-llama/Meta-Llama-3-8B-Instruct",
|
93 |
"architectures": [
|
94 |
"LlamaForCausalLM"
|
95 |
],
|
@@ -114,48 +98,46 @@
|
|
114 |
"tie_word_embeddings": false,
|
115 |
"torch_dtype": "bfloat16",
|
116 |
"transformers_version": "4.42.3",
|
117 |
-
"use_cache":
|
118 |
"vocab_size": 128256
|
119 |
}
|
120 |
|
121 |
|
122 |
-
[INFO|
|
123 |
|
124 |
-
[INFO|modeling_utils.py:
|
125 |
|
126 |
-
[INFO|
|
|
|
|
|
127 |
"bos_token_id": 128000,
|
128 |
"eos_token_id": 128009
|
129 |
}
|
130 |
|
131 |
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
[INFO|modeling_utils.py:4372] 2024-07-23 06:01:24,382 >> All the weights of LlamaForCausalLM were initialized from the model checkpoint at meta-llama/Meta-Llama-3-8B-Instruct.
|
136 |
-
If your task is similar to the task the model of the checkpoint was trained on, you can already use LlamaForCausalLM for predictions without further training.
|
137 |
|
138 |
-
07/23/2024 06:
|
139 |
|
140 |
-
07/23/2024 06:
|
141 |
|
142 |
-
07/23/2024 06:
|
143 |
|
144 |
-
07/23/2024 06:
|
145 |
|
146 |
-
07/23/2024 06:
|
147 |
|
148 |
-
07/23/2024 06:
|
149 |
|
150 |
-
07
|
151 |
|
152 |
-
07/23/2024 06:01:24 - INFO - llamafactory.model.adapter - Fine-tuning method: Full
|
153 |
|
154 |
-
07
|
|
|
155 |
|
156 |
-
[INFO|configuration_utils.py:
|
157 |
|
158 |
-
[INFO|configuration_utils.py:1000] 2024-07-23 06:
|
159 |
"bos_token_id": 128000,
|
160 |
"do_sample": true,
|
161 |
"eos_token_id": [
|
@@ -168,500 +150,60 @@ If your task is similar to the task the model of the checkpoint was trained on,
|
|
168 |
}
|
169 |
|
170 |
|
171 |
-
07
|
172 |
-
|
173 |
-
[INFO|checkpointing.py:103] 2024-07-23 06:01:24,565 >> Gradient checkpointing enabled.
|
174 |
-
|
175 |
-
[INFO|attention.py:80] 2024-07-23 06:01:24,565 >> Using torch SDPA for faster training and inference.
|
176 |
-
|
177 |
-
[INFO|adapter.py:302] 2024-07-23 06:01:24,565 >> Upcasting trainable params to float32.
|
178 |
-
|
179 |
-
[INFO|adapter.py:48] 2024-07-23 06:01:24,565 >> Fine-tuning method: Full
|
180 |
-
|
181 |
-
[INFO|loader.py:196] 2024-07-23 06:01:24,607 >> trainable params: 8,030,261,248 || all params: 8,030,261,248 || trainable%: 100.0000
|
182 |
-
|
183 |
-
07/23/2024 06:01:24 - INFO - llamafactory.model.model_utils.checkpointing - Gradient checkpointing enabled.
|
184 |
-
|
185 |
-
07/23/2024 06:01:24 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
186 |
-
|
187 |
-
07/23/2024 06:01:24 - INFO - llamafactory.model.adapter - Upcasting trainable params to float32.
|
188 |
-
|
189 |
-
07/23/2024 06:01:24 - INFO - llamafactory.model.adapter - Fine-tuning method: Full
|
190 |
-
|
191 |
-
07/23/2024 06:01:24 - INFO - llamafactory.model.loader - trainable params: 8,030,261,248 || all params: 8,030,261,248 || trainable%: 100.0000
|
192 |
-
|
193 |
-
07/23/2024 06:01:24 - INFO - llamafactory.model.model_utils.checkpointing - Gradient checkpointing enabled.
|
194 |
-
|
195 |
-
07/23/2024 06:01:24 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
196 |
-
|
197 |
-
07/23/2024 06:01:24 - INFO - llamafactory.model.adapter - Upcasting trainable params to float32.
|
198 |
-
|
199 |
-
07/23/2024 06:01:24 - INFO - llamafactory.model.adapter - Fine-tuning method: Full
|
200 |
-
|
201 |
-
07/23/2024 06:01:24 - INFO - llamafactory.model.loader - trainable params: 8,030,261,248 || all params: 8,030,261,248 || trainable%: 100.0000
|
202 |
-
|
203 |
-
07/23/2024 06:01:24 - INFO - llamafactory.model.model_utils.checkpointing - Gradient checkpointing enabled.
|
204 |
-
|
205 |
-
07/23/2024 06:01:24 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
206 |
-
|
207 |
-
07/23/2024 06:01:24 - INFO - llamafactory.model.adapter - Upcasting trainable params to float32.
|
208 |
-
|
209 |
-
07/23/2024 06:01:24 - INFO - llamafactory.model.adapter - Fine-tuning method: Full
|
210 |
-
|
211 |
-
07/23/2024 06:01:24 - INFO - llamafactory.model.loader - trainable params: 8,030,261,248 || all params: 8,030,261,248 || trainable%: 100.0000
|
212 |
-
|
213 |
-
[INFO|trainer.py:642] 2024-07-23 06:01:24,612 >> Using auto half precision backend
|
214 |
-
|
215 |
-
07/23/2024 06:01:25 - INFO - llamafactory.model.model_utils.checkpointing - Gradient checkpointing enabled.
|
216 |
-
|
217 |
-
07/23/2024 06:01:25 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
218 |
-
|
219 |
-
07/23/2024 06:01:25 - INFO - llamafactory.model.adapter - Upcasting trainable params to float32.
|
220 |
-
|
221 |
-
07/23/2024 06:01:25 - INFO - llamafactory.model.adapter - Fine-tuning method: Full
|
222 |
-
|
223 |
-
07/23/2024 06:01:25 - INFO - llamafactory.model.loader - trainable params: 8,030,261,248 || all params: 8,030,261,248 || trainable%: 100.0000
|
224 |
-
|
225 |
-
07/23/2024 06:01:25 - INFO - llamafactory.model.model_utils.checkpointing - Gradient checkpointing enabled.
|
226 |
-
|
227 |
-
07/23/2024 06:01:25 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
228 |
-
|
229 |
-
07/23/2024 06:01:25 - INFO - llamafactory.model.adapter - Upcasting trainable params to float32.
|
230 |
-
|
231 |
-
07/23/2024 06:01:25 - INFO - llamafactory.model.adapter - Fine-tuning method: Full
|
232 |
-
|
233 |
-
07/23/2024 06:01:25 - INFO - llamafactory.model.loader - trainable params: 8,030,261,248 || all params: 8,030,261,248 || trainable%: 100.0000
|
234 |
-
|
235 |
-
[INFO|trainer.py:2128] 2024-07-23 06:01:47,813 >> ***** Running training *****
|
236 |
-
|
237 |
-
[INFO|trainer.py:2129] 2024-07-23 06:01:47,813 >> Num examples = 4,968
|
238 |
-
|
239 |
-
[INFO|trainer.py:2130] 2024-07-23 06:01:47,814 >> Num Epochs = 5
|
240 |
-
|
241 |
-
[INFO|trainer.py:2131] 2024-07-23 06:01:47,814 >> Instantaneous batch size per device = 2
|
242 |
-
|
243 |
-
[INFO|trainer.py:2134] 2024-07-23 06:01:47,814 >> Total train batch size (w. parallel, distributed & accumulation) = 128
|
244 |
-
|
245 |
-
[INFO|trainer.py:2135] 2024-07-23 06:01:47,814 >> Gradient Accumulation steps = 8
|
246 |
-
|
247 |
-
[INFO|trainer.py:2136] 2024-07-23 06:01:47,814 >> Total optimization steps = 190
|
248 |
-
|
249 |
-
[INFO|trainer.py:2137] 2024-07-23 06:01:47,815 >> Number of trainable parameters = 8,030,261,248
|
250 |
-
|
251 |
-
[INFO|callbacks.py:310] 2024-07-23 06:02:03,431 >> {'loss': 14.1364, 'learning_rate': 5.0000e-07, 'epoch': 0.03, 'throughput': 417.05}
|
252 |
-
|
253 |
-
[INFO|callbacks.py:310] 2024-07-23 06:02:16,666 >> {'loss': 13.7804, 'learning_rate': 1.0000e-06, 'epoch': 0.05, 'throughput': 445.34}
|
254 |
-
|
255 |
-
[INFO|callbacks.py:310] 2024-07-23 06:02:29,887 >> {'loss': 13.4871, 'learning_rate': 1.5000e-06, 'epoch': 0.08, 'throughput': 457.52}
|
256 |
-
|
257 |
-
[INFO|callbacks.py:310] 2024-07-23 06:02:43,112 >> {'loss': 12.7900, 'learning_rate': 2.0000e-06, 'epoch': 0.10, 'throughput': 458.34}
|
258 |
-
|
259 |
-
[INFO|callbacks.py:310] 2024-07-23 06:02:56,320 >> {'loss': 9.2748, 'learning_rate': 2.5000e-06, 'epoch': 0.13, 'throughput': 466.66}
|
260 |
-
|
261 |
-
[INFO|callbacks.py:310] 2024-07-23 06:03:09,521 >> {'loss': 6.5585, 'learning_rate': 3.0000e-06, 'epoch': 0.15, 'throughput': 467.05}
|
262 |
-
|
263 |
-
[INFO|callbacks.py:310] 2024-07-23 06:03:22,727 >> {'loss': 5.3984, 'learning_rate': 3.5000e-06, 'epoch': 0.18, 'throughput': 465.11}
|
264 |
-
|
265 |
-
[INFO|callbacks.py:310] 2024-07-23 06:03:35,927 >> {'loss': 1.9363, 'learning_rate': 4.0000e-06, 'epoch': 0.21, 'throughput': 466.48}
|
266 |
-
|
267 |
-
[INFO|callbacks.py:310] 2024-07-23 06:03:49,137 >> {'loss': 0.6783, 'learning_rate': 4.5000e-06, 'epoch': 0.23, 'throughput': 469.24}
|
268 |
-
|
269 |
-
[INFO|callbacks.py:310] 2024-07-23 06:04:02,342 >> {'loss': 2.9945, 'learning_rate': 5.0000e-06, 'epoch': 0.26, 'throughput': 470.87}
|
270 |
-
|
271 |
-
[INFO|callbacks.py:310] 2024-07-23 06:04:15,544 >> {'loss': 0.2916, 'learning_rate': 4.9996e-06, 'epoch': 0.28, 'throughput': 470.92}
|
272 |
-
|
273 |
-
[INFO|callbacks.py:310] 2024-07-23 06:04:28,766 >> {'loss': 2.2775, 'learning_rate': 4.9985e-06, 'epoch': 0.31, 'throughput': 471.10}
|
274 |
-
|
275 |
-
[INFO|callbacks.py:310] 2024-07-23 06:04:41,974 >> {'loss': 0.3757, 'learning_rate': 4.9966e-06, 'epoch': 0.33, 'throughput': 471.76}
|
276 |
-
|
277 |
-
[INFO|callbacks.py:310] 2024-07-23 06:04:55,162 >> {'loss': 1.9543, 'learning_rate': 4.9939e-06, 'epoch': 0.36, 'throughput': 473.05}
|
278 |
-
|
279 |
-
[INFO|callbacks.py:310] 2024-07-23 06:05:08,380 >> {'loss': 0.7398, 'learning_rate': 4.9905e-06, 'epoch': 0.39, 'throughput': 472.67}
|
280 |
-
|
281 |
-
[INFO|callbacks.py:310] 2024-07-23 06:05:21,579 >> {'loss': 1.1868, 'learning_rate': 4.9863e-06, 'epoch': 0.41, 'throughput': 474.17}
|
282 |
-
|
283 |
-
[INFO|callbacks.py:310] 2024-07-23 06:05:34,789 >> {'loss': 0.5418, 'learning_rate': 4.9814e-06, 'epoch': 0.44, 'throughput': 473.57}
|
284 |
-
|
285 |
-
[INFO|callbacks.py:310] 2024-07-23 06:05:47,979 >> {'loss': 0.2263, 'learning_rate': 4.9757e-06, 'epoch': 0.46, 'throughput': 474.35}
|
286 |
-
|
287 |
-
[INFO|callbacks.py:310] 2024-07-23 06:06:01,181 >> {'loss': 0.1612, 'learning_rate': 4.9692e-06, 'epoch': 0.49, 'throughput': 475.02}
|
288 |
-
|
289 |
-
[INFO|callbacks.py:310] 2024-07-23 06:06:14,390 >> {'loss': 0.3299, 'learning_rate': 4.9620e-06, 'epoch': 0.51, 'throughput': 475.97}
|
290 |
-
|
291 |
-
[INFO|callbacks.py:310] 2024-07-23 06:06:27,596 >> {'loss': 0.2013, 'learning_rate': 4.9541e-06, 'epoch': 0.54, 'throughput': 476.60}
|
292 |
-
|
293 |
-
[INFO|callbacks.py:310] 2024-07-23 06:06:40,782 >> {'loss': 0.2446, 'learning_rate': 4.9454e-06, 'epoch': 0.57, 'throughput': 477.00}
|
294 |
-
|
295 |
-
[INFO|callbacks.py:310] 2024-07-23 06:06:53,988 >> {'loss': 0.2235, 'learning_rate': 4.9359e-06, 'epoch': 0.59, 'throughput': 477.33}
|
296 |
-
|
297 |
-
[INFO|callbacks.py:310] 2024-07-23 06:07:07,195 >> {'loss': 0.1160, 'learning_rate': 4.9257e-06, 'epoch': 0.62, 'throughput': 476.68}
|
298 |
-
|
299 |
-
[INFO|callbacks.py:310] 2024-07-23 06:07:20,384 >> {'loss': 0.2179, 'learning_rate': 4.9148e-06, 'epoch': 0.64, 'throughput': 476.63}
|
300 |
-
|
301 |
-
[INFO|callbacks.py:310] 2024-07-23 06:07:33,580 >> {'loss': 0.1414, 'learning_rate': 4.9032e-06, 'epoch': 0.67, 'throughput': 476.58}
|
302 |
-
|
303 |
-
[INFO|callbacks.py:310] 2024-07-23 06:07:46,768 >> {'loss': 0.1181, 'learning_rate': 4.8908e-06, 'epoch': 0.69, 'throughput': 477.26}
|
304 |
-
|
305 |
-
[INFO|callbacks.py:310] 2024-07-23 06:07:59,974 >> {'loss': 0.2753, 'learning_rate': 4.8776e-06, 'epoch': 0.72, 'throughput': 477.91}
|
306 |
-
|
307 |
-
[INFO|callbacks.py:310] 2024-07-23 06:08:13,168 >> {'loss': 0.3255, 'learning_rate': 4.8638e-06, 'epoch': 0.75, 'throughput': 478.44}
|
308 |
-
|
309 |
-
[INFO|callbacks.py:310] 2024-07-23 06:08:26,364 >> {'loss': 0.2352, 'learning_rate': 4.8492e-06, 'epoch': 0.77, 'throughput': 479.06}
|
310 |
-
|
311 |
-
[INFO|callbacks.py:310] 2024-07-23 06:08:39,564 >> {'loss': 0.0630, 'learning_rate': 4.8340e-06, 'epoch': 0.80, 'throughput': 479.01}
|
312 |
-
|
313 |
-
[INFO|callbacks.py:310] 2024-07-23 06:08:52,759 >> {'loss': 0.2042, 'learning_rate': 4.8180e-06, 'epoch': 0.82, 'throughput': 479.09}
|
314 |
-
|
315 |
-
[INFO|callbacks.py:310] 2024-07-23 06:09:05,957 >> {'loss': 0.1364, 'learning_rate': 4.8013e-06, 'epoch': 0.85, 'throughput': 479.59}
|
316 |
-
|
317 |
-
[INFO|callbacks.py:310] 2024-07-23 06:09:19,151 >> {'loss': 0.0934, 'learning_rate': 4.7839e-06, 'epoch': 0.87, 'throughput': 479.71}
|
318 |
-
|
319 |
-
[INFO|callbacks.py:310] 2024-07-23 06:09:32,336 >> {'loss': 0.1332, 'learning_rate': 4.7658e-06, 'epoch': 0.90, 'throughput': 479.91}
|
320 |
-
|
321 |
-
[INFO|callbacks.py:310] 2024-07-23 06:09:45,527 >> {'loss': 0.1595, 'learning_rate': 4.7470e-06, 'epoch': 0.93, 'throughput': 479.86}
|
322 |
-
|
323 |
-
[INFO|callbacks.py:310] 2024-07-23 06:09:58,740 >> {'loss': 0.1528, 'learning_rate': 4.7275e-06, 'epoch': 0.95, 'throughput': 480.07}
|
324 |
-
|
325 |
-
[INFO|callbacks.py:310] 2024-07-23 06:10:11,945 >> {'loss': 0.1342, 'learning_rate': 4.7074e-06, 'epoch': 0.98, 'throughput': 480.42}
|
326 |
-
|
327 |
-
[INFO|callbacks.py:310] 2024-07-23 06:10:25,143 >> {'loss': 0.1586, 'learning_rate': 4.6865e-06, 'epoch': 1.00, 'throughput': 480.69}
|
328 |
-
|
329 |
-
[INFO|callbacks.py:310] 2024-07-23 06:10:38,337 >> {'loss': 0.1072, 'learning_rate': 4.6651e-06, 'epoch': 1.03, 'throughput': 480.80}
|
330 |
-
|
331 |
-
[INFO|callbacks.py:310] 2024-07-23 06:10:51,540 >> {'loss': 0.0357, 'learning_rate': 4.6429e-06, 'epoch': 1.05, 'throughput': 481.10}
|
332 |
-
|
333 |
-
[INFO|callbacks.py:310] 2024-07-23 06:11:04,729 >> {'loss': 0.0600, 'learning_rate': 4.6201e-06, 'epoch': 1.08, 'throughput': 481.28}
|
334 |
-
|
335 |
-
[INFO|callbacks.py:310] 2024-07-23 06:11:17,926 >> {'loss': 0.0902, 'learning_rate': 4.5967e-06, 'epoch': 1.11, 'throughput': 481.59}
|
336 |
-
|
337 |
-
[INFO|callbacks.py:310] 2024-07-23 06:11:31,117 >> {'loss': 0.0202, 'learning_rate': 4.5726e-06, 'epoch': 1.13, 'throughput': 481.67}
|
338 |
-
|
339 |
-
[INFO|callbacks.py:310] 2024-07-23 06:11:44,319 >> {'loss': 0.0380, 'learning_rate': 4.5479e-06, 'epoch': 1.16, 'throughput': 481.31}
|
340 |
-
|
341 |
-
[INFO|callbacks.py:310] 2024-07-23 06:11:57,513 >> {'loss': 0.0379, 'learning_rate': 4.5225e-06, 'epoch': 1.18, 'throughput': 481.42}
|
342 |
-
|
343 |
-
[INFO|callbacks.py:310] 2024-07-23 06:12:10,721 >> {'loss': 0.0742, 'learning_rate': 4.4966e-06, 'epoch': 1.21, 'throughput': 481.51}
|
344 |
-
|
345 |
-
[INFO|callbacks.py:310] 2024-07-23 06:12:23,931 >> {'loss': 0.0658, 'learning_rate': 4.4700e-06, 'epoch': 1.23, 'throughput': 480.94}
|
346 |
-
|
347 |
-
[INFO|callbacks.py:310] 2024-07-23 06:12:37,136 >> {'loss': 0.0336, 'learning_rate': 4.4429e-06, 'epoch': 1.26, 'throughput': 481.32}
|
348 |
-
|
349 |
-
[INFO|callbacks.py:310] 2024-07-23 06:12:50,333 >> {'loss': 0.1021, 'learning_rate': 4.4151e-06, 'epoch': 1.29, 'throughput': 481.12}
|
350 |
-
|
351 |
-
[INFO|callbacks.py:310] 2024-07-23 06:13:03,531 >> {'loss': 0.1312, 'learning_rate': 4.3868e-06, 'epoch': 1.31, 'throughput': 481.10}
|
352 |
-
|
353 |
-
[INFO|callbacks.py:310] 2024-07-23 06:13:16,740 >> {'loss': 0.0665, 'learning_rate': 4.3579e-06, 'epoch': 1.34, 'throughput': 481.08}
|
354 |
-
|
355 |
-
[INFO|callbacks.py:310] 2024-07-23 06:13:29,940 >> {'loss': 0.0679, 'learning_rate': 4.3284e-06, 'epoch': 1.36, 'throughput': 481.08}
|
356 |
-
|
357 |
-
[INFO|callbacks.py:310] 2024-07-23 06:13:43,136 >> {'loss': 0.0579, 'learning_rate': 4.2983e-06, 'epoch': 1.39, 'throughput': 480.90}
|
358 |
-
|
359 |
-
[INFO|callbacks.py:310] 2024-07-23 06:13:56,338 >> {'loss': 0.0542, 'learning_rate': 4.2678e-06, 'epoch': 1.41, 'throughput': 481.15}
|
360 |
-
|
361 |
-
[INFO|callbacks.py:310] 2024-07-23 06:14:09,544 >> {'loss': 0.0476, 'learning_rate': 4.2366e-06, 'epoch': 1.44, 'throughput': 481.17}
|
362 |
-
|
363 |
-
[INFO|callbacks.py:310] 2024-07-23 06:14:22,733 >> {'loss': 0.0613, 'learning_rate': 4.2050e-06, 'epoch': 1.47, 'throughput': 481.35}
|
364 |
-
|
365 |
-
[INFO|callbacks.py:310] 2024-07-23 06:14:35,924 >> {'loss': 0.0995, 'learning_rate': 4.1728e-06, 'epoch': 1.49, 'throughput': 481.29}
|
366 |
-
|
367 |
-
[INFO|callbacks.py:310] 2024-07-23 06:14:49,119 >> {'loss': 0.0532, 'learning_rate': 4.1401e-06, 'epoch': 1.52, 'throughput': 481.12}
|
368 |
-
|
369 |
-
[INFO|callbacks.py:310] 2024-07-23 06:15:02,320 >> {'loss': 0.0824, 'learning_rate': 4.1070e-06, 'epoch': 1.54, 'throughput': 481.19}
|
370 |
-
|
371 |
-
[INFO|callbacks.py:310] 2024-07-23 06:15:15,504 >> {'loss': 0.0499, 'learning_rate': 4.0733e-06, 'epoch': 1.57, 'throughput': 481.24}
|
372 |
-
|
373 |
-
[INFO|callbacks.py:310] 2024-07-23 06:15:28,693 >> {'loss': 0.0413, 'learning_rate': 4.0392e-06, 'epoch': 1.59, 'throughput': 481.38}
|
374 |
-
|
375 |
-
[INFO|callbacks.py:310] 2024-07-23 06:15:41,892 >> {'loss': 0.0637, 'learning_rate': 4.0045e-06, 'epoch': 1.62, 'throughput': 481.64}
|
376 |
-
|
377 |
-
[INFO|callbacks.py:310] 2024-07-23 06:15:55,095 >> {'loss': 0.0529, 'learning_rate': 3.9695e-06, 'epoch': 1.65, 'throughput': 481.43}
|
378 |
-
|
379 |
-
[INFO|callbacks.py:310] 2024-07-23 06:16:08,283 >> {'loss': 0.0474, 'learning_rate': 3.9339e-06, 'epoch': 1.67, 'throughput': 481.41}
|
380 |
-
|
381 |
-
[INFO|callbacks.py:310] 2024-07-23 06:16:21,469 >> {'loss': 0.0649, 'learning_rate': 3.8980e-06, 'epoch': 1.70, 'throughput': 481.25}
|
382 |
-
|
383 |
-
[INFO|callbacks.py:310] 2024-07-23 06:16:34,678 >> {'loss': 0.0505, 'learning_rate': 3.8616e-06, 'epoch': 1.72, 'throughput': 481.23}
|
384 |
-
|
385 |
-
[INFO|callbacks.py:310] 2024-07-23 06:16:47,877 >> {'loss': 0.0621, 'learning_rate': 3.8248e-06, 'epoch': 1.75, 'throughput': 480.87}
|
386 |
-
|
387 |
-
[INFO|callbacks.py:310] 2024-07-23 06:17:01,073 >> {'loss': 0.0769, 'learning_rate': 3.7876e-06, 'epoch': 1.77, 'throughput': 480.95}
|
388 |
-
|
389 |
-
[INFO|callbacks.py:310] 2024-07-23 06:17:14,279 >> {'loss': 0.0435, 'learning_rate': 3.7500e-06, 'epoch': 1.80, 'throughput': 481.18}
|
390 |
-
|
391 |
-
[INFO|callbacks.py:310] 2024-07-23 06:17:27,460 >> {'loss': 0.0673, 'learning_rate': 3.7120e-06, 'epoch': 1.83, 'throughput': 481.46}
|
392 |
-
|
393 |
-
[INFO|callbacks.py:310] 2024-07-23 06:17:40,664 >> {'loss': 0.1316, 'learning_rate': 3.6737e-06, 'epoch': 1.85, 'throughput': 481.22}
|
394 |
-
|
395 |
-
[INFO|callbacks.py:310] 2024-07-23 06:17:53,859 >> {'loss': 0.0531, 'learning_rate': 3.6350e-06, 'epoch': 1.88, 'throughput': 481.32}
|
396 |
-
|
397 |
-
[INFO|callbacks.py:310] 2024-07-23 06:18:07,061 >> {'loss': 0.0287, 'learning_rate': 3.5959e-06, 'epoch': 1.90, 'throughput': 481.43}
|
398 |
-
|
399 |
-
[INFO|callbacks.py:310] 2024-07-23 06:18:20,271 >> {'loss': 0.0648, 'learning_rate': 3.5565e-06, 'epoch': 1.93, 'throughput': 481.41}
|
400 |
-
|
401 |
-
[INFO|callbacks.py:310] 2024-07-23 06:18:33,470 >> {'loss': 0.1211, 'learning_rate': 3.5168e-06, 'epoch': 1.95, 'throughput': 481.37}
|
402 |
-
|
403 |
-
[INFO|callbacks.py:310] 2024-07-23 06:18:46,678 >> {'loss': 0.0879, 'learning_rate': 3.4768e-06, 'epoch': 1.98, 'throughput': 481.10}
|
404 |
-
|
405 |
-
[INFO|callbacks.py:310] 2024-07-23 06:18:59,891 >> {'loss': 0.0227, 'learning_rate': 3.4365e-06, 'epoch': 2.01, 'throughput': 481.24}
|
406 |
-
|
407 |
-
[INFO|callbacks.py:310] 2024-07-23 06:19:13,093 >> {'loss': 0.0228, 'learning_rate': 3.3959e-06, 'epoch': 2.03, 'throughput': 481.30}
|
408 |
-
|
409 |
-
[INFO|callbacks.py:310] 2024-07-23 06:19:26,288 >> {'loss': 0.0360, 'learning_rate': 3.3551e-06, 'epoch': 2.06, 'throughput': 481.33}
|
410 |
-
|
411 |
-
[INFO|callbacks.py:310] 2024-07-23 06:19:39,500 >> {'loss': 0.0138, 'learning_rate': 3.3139e-06, 'epoch': 2.08, 'throughput': 481.23}
|
412 |
-
|
413 |
-
[INFO|callbacks.py:310] 2024-07-23 06:19:52,712 >> {'loss': 0.0697, 'learning_rate': 3.2725e-06, 'epoch': 2.11, 'throughput': 481.06}
|
414 |
-
|
415 |
-
[INFO|callbacks.py:310] 2024-07-23 06:20:05,915 >> {'loss': 0.0508, 'learning_rate': 3.2309e-06, 'epoch': 2.14, 'throughput': 480.79}
|
416 |
-
|
417 |
-
[INFO|callbacks.py:310] 2024-07-23 06:20:19,109 >> {'loss': 0.0088, 'learning_rate': 3.1891e-06, 'epoch': 2.16, 'throughput': 481.08}
|
418 |
-
|
419 |
-
[INFO|callbacks.py:310] 2024-07-23 06:20:32,308 >> {'loss': 0.0158, 'learning_rate': 3.1470e-06, 'epoch': 2.19, 'throughput': 481.26}
|
420 |
-
|
421 |
-
[INFO|callbacks.py:310] 2024-07-23 06:20:45,510 >> {'loss': 0.0060, 'learning_rate': 3.1048e-06, 'epoch': 2.21, 'throughput': 481.23}
|
422 |
-
|
423 |
-
[INFO|callbacks.py:310] 2024-07-23 06:20:58,705 >> {'loss': 0.0380, 'learning_rate': 3.0624e-06, 'epoch': 2.24, 'throughput': 481.60}
|
424 |
-
|
425 |
-
[INFO|callbacks.py:310] 2024-07-23 06:21:11,910 >> {'loss': 0.0004, 'learning_rate': 3.0198e-06, 'epoch': 2.26, 'throughput': 481.51}
|
426 |
-
|
427 |
-
[INFO|callbacks.py:310] 2024-07-23 06:21:25,109 >> {'loss': 0.0111, 'learning_rate': 2.9770e-06, 'epoch': 2.29, 'throughput': 481.43}
|
428 |
-
|
429 |
-
[INFO|callbacks.py:310] 2024-07-23 06:21:38,303 >> {'loss': 0.0008, 'learning_rate': 2.9341e-06, 'epoch': 2.32, 'throughput': 481.60}
|
430 |
-
|
431 |
-
[INFO|callbacks.py:310] 2024-07-23 06:21:51,491 >> {'loss': 0.0182, 'learning_rate': 2.8911e-06, 'epoch': 2.34, 'throughput': 481.67}
|
432 |
-
|
433 |
-
[INFO|callbacks.py:310] 2024-07-23 06:22:04,691 >> {'loss': 0.0491, 'learning_rate': 2.8479e-06, 'epoch': 2.37, 'throughput': 481.64}
|
434 |
-
|
435 |
-
[INFO|callbacks.py:310] 2024-07-23 06:22:17,877 >> {'loss': 0.0040, 'learning_rate': 2.8047e-06, 'epoch': 2.39, 'throughput': 481.71}
|
436 |
-
|
437 |
-
[INFO|callbacks.py:310] 2024-07-23 06:22:31,075 >> {'loss': 0.0176, 'learning_rate': 2.7613e-06, 'epoch': 2.42, 'throughput': 481.77}
|
438 |
-
|
439 |
-
[INFO|callbacks.py:310] 2024-07-23 06:22:44,264 >> {'loss': 0.0190, 'learning_rate': 2.7179e-06, 'epoch': 2.44, 'throughput': 481.70}
|
440 |
-
|
441 |
-
[INFO|callbacks.py:310] 2024-07-23 06:22:57,479 >> {'loss': 0.0270, 'learning_rate': 2.6744e-06, 'epoch': 2.47, 'throughput': 481.50}
|
442 |
-
|
443 |
-
[INFO|callbacks.py:310] 2024-07-23 06:23:10,686 >> {'loss': 0.0354, 'learning_rate': 2.6308e-06, 'epoch': 2.50, 'throughput': 481.51}
|
444 |
-
|
445 |
-
[INFO|callbacks.py:310] 2024-07-23 06:23:23,876 >> {'loss': 0.0741, 'learning_rate': 2.5872e-06, 'epoch': 2.52, 'throughput': 481.62}
|
446 |
-
|
447 |
-
[INFO|callbacks.py:310] 2024-07-23 06:23:37,082 >> {'loss': 0.0582, 'learning_rate': 2.5436e-06, 'epoch': 2.55, 'throughput': 481.56}
|
448 |
-
|
449 |
-
[INFO|callbacks.py:310] 2024-07-23 06:23:50,275 >> {'loss': 0.0096, 'learning_rate': 2.5000e-06, 'epoch': 2.57, 'throughput': 481.71}
|
450 |
-
|
451 |
-
[INFO|callbacks.py:310] 2024-07-23 06:24:03,470 >> {'loss': 0.0263, 'learning_rate': 2.4564e-06, 'epoch': 2.60, 'throughput': 481.77}
|
452 |
-
|
453 |
-
[INFO|callbacks.py:310] 2024-07-23 06:24:16,658 >> {'loss': 0.0121, 'learning_rate': 2.4128e-06, 'epoch': 2.62, 'throughput': 481.72}
|
454 |
-
|
455 |
-
[INFO|callbacks.py:310] 2024-07-23 06:24:29,856 >> {'loss': 0.0204, 'learning_rate': 2.3692e-06, 'epoch': 2.65, 'throughput': 481.70}
|
456 |
-
|
457 |
-
[INFO|callbacks.py:310] 2024-07-23 06:24:43,053 >> {'loss': 0.0325, 'learning_rate': 2.3256e-06, 'epoch': 2.68, 'throughput': 481.83}
|
458 |
-
|
459 |
-
[INFO|callbacks.py:310] 2024-07-23 06:24:56,246 >> {'loss': 0.0076, 'learning_rate': 2.2821e-06, 'epoch': 2.70, 'throughput': 481.67}
|
460 |
-
|
461 |
-
[INFO|callbacks.py:310] 2024-07-23 06:25:09,456 >> {'loss': 0.0485, 'learning_rate': 2.2387e-06, 'epoch': 2.73, 'throughput': 481.57}
|
462 |
-
|
463 |
-
[INFO|callbacks.py:310] 2024-07-23 06:25:22,661 >> {'loss': 0.0070, 'learning_rate': 2.1953e-06, 'epoch': 2.75, 'throughput': 481.41}
|
464 |
-
|
465 |
-
[INFO|callbacks.py:310] 2024-07-23 06:25:35,871 >> {'loss': 0.0347, 'learning_rate': 2.1521e-06, 'epoch': 2.78, 'throughput': 481.34}
|
466 |
-
|
467 |
-
[INFO|callbacks.py:310] 2024-07-23 06:25:49,061 >> {'loss': 0.0142, 'learning_rate': 2.1089e-06, 'epoch': 2.80, 'throughput': 481.52}
|
468 |
-
|
469 |
-
[INFO|callbacks.py:310] 2024-07-23 06:26:02,269 >> {'loss': 0.0414, 'learning_rate': 2.0659e-06, 'epoch': 2.83, 'throughput': 481.52}
|
470 |
-
|
471 |
-
[INFO|callbacks.py:310] 2024-07-23 06:26:15,475 >> {'loss': 0.0419, 'learning_rate': 2.0230e-06, 'epoch': 2.86, 'throughput': 481.56}
|
472 |
-
|
473 |
-
[INFO|callbacks.py:310] 2024-07-23 06:26:28,670 >> {'loss': 0.0430, 'learning_rate': 1.9802e-06, 'epoch': 2.88, 'throughput': 481.65}
|
474 |
-
|
475 |
-
[INFO|callbacks.py:310] 2024-07-23 06:26:41,866 >> {'loss': 0.0192, 'learning_rate': 1.9376e-06, 'epoch': 2.91, 'throughput': 481.62}
|
476 |
-
|
477 |
-
[INFO|callbacks.py:310] 2024-07-23 06:26:55,067 >> {'loss': 0.0427, 'learning_rate': 1.8952e-06, 'epoch': 2.93, 'throughput': 481.66}
|
478 |
-
|
479 |
-
[INFO|callbacks.py:310] 2024-07-23 06:27:08,281 >> {'loss': 0.0116, 'learning_rate': 1.8530e-06, 'epoch': 2.96, 'throughput': 481.58}
|
480 |
-
|
481 |
-
[INFO|callbacks.py:310] 2024-07-23 06:27:21,473 >> {'loss': 0.0135, 'learning_rate': 1.8109e-06, 'epoch': 2.98, 'throughput': 481.53}
|
482 |
-
|
483 |
-
[INFO|callbacks.py:310] 2024-07-23 06:27:34,671 >> {'loss': 0.0128, 'learning_rate': 1.7691e-06, 'epoch': 3.01, 'throughput': 481.54}
|
484 |
-
|
485 |
-
[INFO|callbacks.py:310] 2024-07-23 06:27:47,872 >> {'loss': 0.0021, 'learning_rate': 1.7275e-06, 'epoch': 3.04, 'throughput': 481.65}
|
486 |
-
|
487 |
-
[INFO|callbacks.py:310] 2024-07-23 06:28:01,075 >> {'loss': 0.0057, 'learning_rate': 1.6861e-06, 'epoch': 3.06, 'throughput': 481.57}
|
488 |
-
|
489 |
-
[INFO|callbacks.py:310] 2024-07-23 06:28:14,261 >> {'loss': 0.0197, 'learning_rate': 1.6449e-06, 'epoch': 3.09, 'throughput': 481.54}
|
490 |
-
|
491 |
-
[INFO|callbacks.py:310] 2024-07-23 06:28:27,467 >> {'loss': 0.0017, 'learning_rate': 1.6041e-06, 'epoch': 3.11, 'throughput': 481.40}
|
492 |
-
|
493 |
-
[INFO|callbacks.py:310] 2024-07-23 06:28:40,676 >> {'loss': 0.0068, 'learning_rate': 1.5635e-06, 'epoch': 3.14, 'throughput': 481.24}
|
494 |
-
|
495 |
-
[INFO|callbacks.py:310] 2024-07-23 06:28:53,872 >> {'loss': 0.0022, 'learning_rate': 1.5232e-06, 'epoch': 3.16, 'throughput': 481.20}
|
496 |
-
|
497 |
-
[INFO|callbacks.py:310] 2024-07-23 06:29:07,052 >> {'loss': 0.0162, 'learning_rate': 1.4832e-06, 'epoch': 3.19, 'throughput': 481.37}
|
498 |
-
|
499 |
-
[INFO|callbacks.py:310] 2024-07-23 06:29:20,250 >> {'loss': 0.0014, 'learning_rate': 1.4435e-06, 'epoch': 3.22, 'throughput': 481.43}
|
500 |
-
|
501 |
-
[INFO|callbacks.py:310] 2024-07-23 06:29:33,459 >> {'loss': 0.0063, 'learning_rate': 1.4041e-06, 'epoch': 3.24, 'throughput': 481.43}
|
502 |
-
|
503 |
-
[INFO|callbacks.py:310] 2024-07-23 06:29:46,654 >> {'loss': 0.0282, 'learning_rate': 1.3650e-06, 'epoch': 3.27, 'throughput': 481.54}
|
504 |
-
|
505 |
-
[INFO|callbacks.py:310] 2024-07-23 06:29:59,860 >> {'loss': 0.0003, 'learning_rate': 1.3263e-06, 'epoch': 3.29, 'throughput': 481.60}
|
506 |
-
|
507 |
-
[INFO|callbacks.py:310] 2024-07-23 06:30:13,068 >> {'loss': 0.0002, 'learning_rate': 1.2880e-06, 'epoch': 3.32, 'throughput': 481.57}
|
508 |
-
|
509 |
-
[INFO|callbacks.py:310] 2024-07-23 06:30:26,274 >> {'loss': 0.0004, 'learning_rate': 1.2500e-06, 'epoch': 3.34, 'throughput': 481.59}
|
510 |
-
|
511 |
-
[INFO|callbacks.py:310] 2024-07-23 06:30:39,468 >> {'loss': 0.0169, 'learning_rate': 1.2124e-06, 'epoch': 3.37, 'throughput': 481.65}
|
512 |
-
|
513 |
-
[INFO|callbacks.py:310] 2024-07-23 06:30:52,681 >> {'loss': 0.0127, 'learning_rate': 1.1752e-06, 'epoch': 3.40, 'throughput': 481.55}
|
514 |
-
|
515 |
-
[INFO|callbacks.py:310] 2024-07-23 06:31:05,890 >> {'loss': 0.0045, 'learning_rate': 1.1384e-06, 'epoch': 3.42, 'throughput': 481.46}
|
516 |
-
|
517 |
-
[INFO|callbacks.py:310] 2024-07-23 06:31:19,081 >> {'loss': 0.0924, 'learning_rate': 1.1020e-06, 'epoch': 3.45, 'throughput': 481.54}
|
518 |
-
|
519 |
-
[INFO|callbacks.py:310] 2024-07-23 06:31:32,267 >> {'loss': 0.0067, 'learning_rate': 1.0661e-06, 'epoch': 3.47, 'throughput': 481.56}
|
520 |
-
|
521 |
-
[INFO|callbacks.py:310] 2024-07-23 06:31:45,479 >> {'loss': 0.0030, 'learning_rate': 1.0305e-06, 'epoch': 3.50, 'throughput': 481.43}
|
522 |
-
|
523 |
-
[INFO|callbacks.py:310] 2024-07-23 06:31:58,677 >> {'loss': 0.0164, 'learning_rate': 9.9546e-07, 'epoch': 3.52, 'throughput': 481.52}
|
524 |
-
|
525 |
-
[INFO|callbacks.py:310] 2024-07-23 06:32:11,871 >> {'loss': 0.0018, 'learning_rate': 9.6085e-07, 'epoch': 3.55, 'throughput': 481.54}
|
526 |
-
|
527 |
-
[INFO|callbacks.py:310] 2024-07-23 06:32:25,063 >> {'loss': 0.0226, 'learning_rate': 9.2670e-07, 'epoch': 3.58, 'throughput': 481.56}
|
528 |
-
|
529 |
-
[INFO|callbacks.py:310] 2024-07-23 06:32:38,283 >> {'loss': 0.0008, 'learning_rate': 8.9303e-07, 'epoch': 3.60, 'throughput': 481.50}
|
530 |
-
|
531 |
-
[INFO|callbacks.py:310] 2024-07-23 06:32:51,483 >> {'loss': 0.0004, 'learning_rate': 8.5985e-07, 'epoch': 3.63, 'throughput': 481.37}
|
532 |
-
|
533 |
-
[INFO|callbacks.py:310] 2024-07-23 06:33:04,683 >> {'loss': 0.0008, 'learning_rate': 8.2717e-07, 'epoch': 3.65, 'throughput': 481.41}
|
534 |
-
|
535 |
-
[INFO|callbacks.py:310] 2024-07-23 06:33:17,874 >> {'loss': 0.0256, 'learning_rate': 7.9500e-07, 'epoch': 3.68, 'throughput': 481.37}
|
536 |
-
|
537 |
-
[INFO|callbacks.py:310] 2024-07-23 06:33:31,067 >> {'loss': 0.0005, 'learning_rate': 7.6335e-07, 'epoch': 3.70, 'throughput': 481.34}
|
538 |
-
|
539 |
-
[INFO|callbacks.py:310] 2024-07-23 06:33:44,265 >> {'loss': 0.0045, 'learning_rate': 7.3223e-07, 'epoch': 3.73, 'throughput': 481.41}
|
540 |
-
|
541 |
-
[INFO|callbacks.py:310] 2024-07-23 06:33:57,472 >> {'loss': 0.0005, 'learning_rate': 7.0165e-07, 'epoch': 3.76, 'throughput': 481.25}
|
542 |
-
|
543 |
-
[INFO|callbacks.py:310] 2024-07-23 06:34:10,676 >> {'loss': 0.0069, 'learning_rate': 6.7162e-07, 'epoch': 3.78, 'throughput': 481.50}
|
544 |
-
|
545 |
-
[INFO|callbacks.py:310] 2024-07-23 06:34:23,871 >> {'loss': 0.0150, 'learning_rate': 6.4214e-07, 'epoch': 3.81, 'throughput': 481.56}
|
546 |
-
|
547 |
-
[INFO|callbacks.py:310] 2024-07-23 06:34:37,067 >> {'loss': 0.0012, 'learning_rate': 6.1323e-07, 'epoch': 3.83, 'throughput': 481.59}
|
548 |
-
|
549 |
-
[INFO|callbacks.py:310] 2024-07-23 06:34:50,261 >> {'loss': 0.0095, 'learning_rate': 5.8489e-07, 'epoch': 3.86, 'throughput': 481.60}
|
550 |
-
|
551 |
-
[INFO|callbacks.py:310] 2024-07-23 06:35:03,468 >> {'loss': 0.0271, 'learning_rate': 5.5714e-07, 'epoch': 3.88, 'throughput': 481.67}
|
552 |
-
|
553 |
-
[INFO|callbacks.py:310] 2024-07-23 06:35:16,672 >> {'loss': 0.0201, 'learning_rate': 5.2997e-07, 'epoch': 3.91, 'throughput': 481.58}
|
554 |
-
|
555 |
-
[INFO|callbacks.py:310] 2024-07-23 06:35:29,856 >> {'loss': 0.0120, 'learning_rate': 5.0341e-07, 'epoch': 3.94, 'throughput': 481.54}
|
556 |
-
|
557 |
-
[INFO|callbacks.py:310] 2024-07-23 06:35:43,060 >> {'loss': 0.0230, 'learning_rate': 4.7746e-07, 'epoch': 3.96, 'throughput': 481.59}
|
558 |
-
|
559 |
-
[INFO|callbacks.py:310] 2024-07-23 06:35:56,266 >> {'loss': 0.0156, 'learning_rate': 4.5212e-07, 'epoch': 3.99, 'throughput': 481.51}
|
560 |
-
|
561 |
-
[INFO|callbacks.py:310] 2024-07-23 06:36:09,469 >> {'loss': 0.0009, 'learning_rate': 4.2741e-07, 'epoch': 4.01, 'throughput': 481.59}
|
562 |
-
|
563 |
-
[INFO|callbacks.py:310] 2024-07-23 06:36:22,665 >> {'loss': 0.0017, 'learning_rate': 4.0332e-07, 'epoch': 4.04, 'throughput': 481.58}
|
564 |
-
|
565 |
-
[INFO|callbacks.py:310] 2024-07-23 06:36:35,861 >> {'loss': 0.0015, 'learning_rate': 3.7988e-07, 'epoch': 4.06, 'throughput': 481.58}
|
566 |
-
|
567 |
-
[INFO|callbacks.py:310] 2024-07-23 06:36:49,059 >> {'loss': 0.0035, 'learning_rate': 3.5708e-07, 'epoch': 4.09, 'throughput': 481.49}
|
568 |
-
|
569 |
-
[INFO|callbacks.py:310] 2024-07-23 06:37:02,272 >> {'loss': 0.0016, 'learning_rate': 3.3494e-07, 'epoch': 4.12, 'throughput': 481.58}
|
570 |
-
|
571 |
-
[INFO|callbacks.py:310] 2024-07-23 06:37:15,468 >> {'loss': 0.0028, 'learning_rate': 3.1345e-07, 'epoch': 4.14, 'throughput': 481.61}
|
572 |
-
|
573 |
-
[INFO|callbacks.py:310] 2024-07-23 06:37:28,670 >> {'loss': 0.0006, 'learning_rate': 2.9263e-07, 'epoch': 4.17, 'throughput': 481.65}
|
574 |
-
|
575 |
-
[INFO|callbacks.py:310] 2024-07-23 06:37:41,872 >> {'loss': 0.0013, 'learning_rate': 2.7248e-07, 'epoch': 4.19, 'throughput': 481.71}
|
576 |
-
|
577 |
-
[INFO|callbacks.py:310] 2024-07-23 06:37:55,073 >> {'loss': 0.0006, 'learning_rate': 2.5301e-07, 'epoch': 4.22, 'throughput': 481.69}
|
578 |
-
|
579 |
-
[INFO|callbacks.py:310] 2024-07-23 06:38:08,283 >> {'loss': 0.0017, 'learning_rate': 2.3423e-07, 'epoch': 4.24, 'throughput': 481.57}
|
580 |
-
|
581 |
-
[INFO|callbacks.py:310] 2024-07-23 06:38:21,478 >> {'loss': 0.0004, 'learning_rate': 2.1614e-07, 'epoch': 4.27, 'throughput': 481.61}
|
582 |
-
|
583 |
-
[INFO|callbacks.py:310] 2024-07-23 06:38:34,685 >> {'loss': 0.0049, 'learning_rate': 1.9874e-07, 'epoch': 4.30, 'throughput': 481.61}
|
584 |
-
|
585 |
-
[INFO|callbacks.py:310] 2024-07-23 06:38:47,883 >> {'loss': 0.0071, 'learning_rate': 1.8204e-07, 'epoch': 4.32, 'throughput': 481.58}
|
586 |
-
|
587 |
-
[INFO|callbacks.py:310] 2024-07-23 06:39:01,101 >> {'loss': 0.0011, 'learning_rate': 1.6605e-07, 'epoch': 4.35, 'throughput': 481.50}
|
588 |
-
|
589 |
-
[INFO|callbacks.py:310] 2024-07-23 06:39:14,310 >> {'loss': 0.0004, 'learning_rate': 1.5077e-07, 'epoch': 4.37, 'throughput': 481.50}
|
590 |
-
|
591 |
-
[INFO|callbacks.py:310] 2024-07-23 06:39:27,517 >> {'loss': 0.0007, 'learning_rate': 1.3620e-07, 'epoch': 4.40, 'throughput': 481.53}
|
592 |
-
|
593 |
-
[INFO|callbacks.py:310] 2024-07-23 06:39:40,725 >> {'loss': 0.0017, 'learning_rate': 1.2236e-07, 'epoch': 4.42, 'throughput': 481.55}
|
594 |
-
|
595 |
-
[INFO|callbacks.py:310] 2024-07-23 06:39:53,914 >> {'loss': 0.0007, 'learning_rate': 1.0924e-07, 'epoch': 4.45, 'throughput': 481.62}
|
596 |
-
|
597 |
-
[INFO|callbacks.py:310] 2024-07-23 06:40:07,118 >> {'loss': 0.0003, 'learning_rate': 9.6846e-08, 'epoch': 4.48, 'throughput': 481.52}
|
598 |
-
|
599 |
-
[INFO|callbacks.py:310] 2024-07-23 06:40:20,320 >> {'loss': 0.0046, 'learning_rate': 8.5185e-08, 'epoch': 4.50, 'throughput': 481.47}
|
600 |
-
|
601 |
-
[INFO|callbacks.py:310] 2024-07-23 06:40:33,527 >> {'loss': 0.0038, 'learning_rate': 7.4261e-08, 'epoch': 4.53, 'throughput': 481.39}
|
602 |
-
|
603 |
-
[INFO|callbacks.py:310] 2024-07-23 06:40:46,729 >> {'loss': 0.0036, 'learning_rate': 6.4075e-08, 'epoch': 4.55, 'throughput': 481.44}
|
604 |
-
|
605 |
-
[INFO|callbacks.py:310] 2024-07-23 06:40:59,918 >> {'loss': 0.0056, 'learning_rate': 5.4631e-08, 'epoch': 4.58, 'throughput': 481.44}
|
606 |
-
|
607 |
-
[INFO|callbacks.py:310] 2024-07-23 06:41:13,108 >> {'loss': 0.0057, 'learning_rate': 4.5932e-08, 'epoch': 4.60, 'throughput': 481.48}
|
608 |
-
|
609 |
-
[INFO|callbacks.py:310] 2024-07-23 06:41:26,307 >> {'loss': 0.0020, 'learning_rate': 3.7981e-08, 'epoch': 4.63, 'throughput': 481.54}
|
610 |
-
|
611 |
-
[INFO|callbacks.py:310] 2024-07-23 06:41:39,504 >> {'loss': 0.0003, 'learning_rate': 3.0779e-08, 'epoch': 4.66, 'throughput': 481.47}
|
612 |
-
|
613 |
-
[INFO|callbacks.py:310] 2024-07-23 06:41:52,709 >> {'loss': 0.0002, 'learning_rate': 2.4330e-08, 'epoch': 4.68, 'throughput': 481.39}
|
614 |
|
615 |
-
[INFO|
|
616 |
|
617 |
-
[INFO|
|
|
|
618 |
|
619 |
-
[INFO|
|
620 |
|
621 |
-
[INFO|
|
622 |
|
623 |
-
[
|
624 |
|
625 |
-
|
626 |
|
627 |
-
|
628 |
|
629 |
-
|
630 |
|
631 |
-
|
632 |
|
633 |
-
|
634 |
|
635 |
-
|
636 |
|
637 |
-
|
638 |
|
639 |
-
|
640 |
|
641 |
-
|
642 |
|
643 |
-
|
644 |
|
645 |
-
|
646 |
|
|
|
647 |
|
|
|
648 |
|
649 |
-
|
650 |
|
651 |
-
|
652 |
|
653 |
-
|
654 |
|
655 |
-
|
656 |
|
657 |
-
|
658 |
|
659 |
-
|
660 |
|
661 |
-
|
662 |
|
663 |
-
|
664 |
|
665 |
-
[INFO|
|
666 |
-
{'task': {'name': 'Causal Language Modeling', 'type': 'text-generation'}}
|
667 |
|
|
|
1 |
+
07/23/2024 06:56:00 - INFO - llamafactory.hparams.parser - Process rank: 5, device: cuda:5, n_gpu: 1, distributed training: True, compute dtype: None
|
2 |
|
3 |
+
07/23/2024 06:56:00 - WARNING - transformers.tokenization_utils_base - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
4 |
|
5 |
+
07/23/2024 06:56:00 - INFO - llamafactory.data.template - Replace eos token: <|eot_id|>
|
6 |
|
7 |
+
07/23/2024 06:56:00 - INFO - llamafactory.hparams.parser - Process rank: 3, device: cuda:3, n_gpu: 1, distributed training: True, compute dtype: None
|
8 |
|
9 |
+
07/23/2024 06:56:00 - INFO - llamafactory.hparams.parser - Process rank: 4, device: cuda:4, n_gpu: 1, distributed training: True, compute dtype: None
|
10 |
|
11 |
+
[INFO|parser.py:325] 2024-07-23 06:56:00,990 >> Process rank: 0, device: cuda:0, n_gpu: 1, distributed training: True, compute dtype: None
|
12 |
|
13 |
+
[INFO|tokenization_utils_base.py:2159] 2024-07-23 06:56:00,992 >> loading file tokenizer.json
|
14 |
|
15 |
+
[INFO|tokenization_utils_base.py:2159] 2024-07-23 06:56:00,992 >> loading file added_tokens.json
|
16 |
|
17 |
+
07/23/2024 06:56:01 - INFO - llamafactory.hparams.parser - Process rank: 6, device: cuda:6, n_gpu: 1, distributed training: True, compute dtype: None
|
18 |
|
19 |
+
07/23/2024 06:56:01 - INFO - llamafactory.hparams.parser - Process rank: 7, device: cuda:7, n_gpu: 1, distributed training: True, compute dtype: None
|
20 |
|
21 |
+
07/23/2024 06:56:01 - INFO - llamafactory.hparams.parser - Process rank: 1, device: cuda:1, n_gpu: 1, distributed training: True, compute dtype: None
|
22 |
|
23 |
+
07/23/2024 06:56:01 - INFO - llamafactory.hparams.parser - Process rank: 2, device: cuda:2, n_gpu: 1, distributed training: True, compute dtype: None
|
24 |
|
25 |
+
07/23/2024 06:56:01 - WARNING - transformers.tokenization_utils_base - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
26 |
|
27 |
+
07/23/2024 06:56:01 - INFO - llamafactory.data.template - Replace eos token: <|eot_id|>
|
28 |
|
29 |
+
07/23/2024 06:56:01 - WARNING - transformers.tokenization_utils_base - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
30 |
|
31 |
+
07/23/2024 06:56:01 - INFO - llamafactory.data.template - Replace eos token: <|eot_id|>
|
32 |
|
33 |
+
[INFO|tokenization_utils_base.py:2159] 2024-07-23 06:56:00,992 >> loading file special_tokens_map.json
|
34 |
|
35 |
+
[INFO|tokenization_utils_base.py:2159] 2024-07-23 06:56:00,992 >> loading file tokenizer_config.json
|
36 |
|
37 |
+
[WARNING|logging.py:313] 2024-07-23 06:56:01,268 >> Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
38 |
|
39 |
+
[INFO|template.py:270] 2024-07-23 06:56:01,269 >> Replace eos token: <|eot_id|>
|
40 |
|
41 |
+
[INFO|loader.py:50] 2024-07-23 06:56:01,269 >> Loading dataset 0716_truthfulqa_benchmark_test.json...
|
42 |
|
43 |
+
07/23/2024 06:56:01 - WARNING - transformers.tokenization_utils_base - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
44 |
|
45 |
+
07/23/2024 06:56:01 - INFO - llamafactory.data.template - Replace eos token: <|eot_id|>
|
46 |
|
47 |
+
07/23/2024 06:56:01 - WARNING - transformers.tokenization_utils_base - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
48 |
|
49 |
+
07/23/2024 06:56:01 - INFO - llamafactory.data.template - Replace eos token: <|eot_id|>
|
50 |
|
51 |
+
07/23/2024 06:56:01 - WARNING - transformers.tokenization_utils_base - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
52 |
|
53 |
+
07/23/2024 06:56:01 - INFO - llamafactory.data.template - Replace eos token: <|eot_id|>
|
54 |
|
55 |
+
07/23/2024 06:56:01 - WARNING - transformers.tokenization_utils_base - Special tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.
|
56 |
|
57 |
+
07/23/2024 06:56:01 - INFO - llamafactory.data.template - Replace eos token: <|eot_id|>
|
58 |
|
59 |
+
07/23/2024 06:56:02 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_test.json...
|
60 |
|
61 |
+
07/23/2024 06:56:02 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_test.json...
|
62 |
|
63 |
+
07/23/2024 06:56:02 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_test.json...
|
64 |
|
65 |
+
07/23/2024 06:56:02 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_test.json...
|
66 |
|
67 |
+
07/23/2024 06:56:02 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_test.json...
|
68 |
|
69 |
+
07/23/2024 06:56:02 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_test.json...
|
70 |
|
71 |
+
07/23/2024 06:56:02 - INFO - llamafactory.data.loader - Loading dataset 0716_truthfulqa_benchmark_test.json...
|
72 |
|
73 |
+
[INFO|configuration_utils.py:731] 2024-07-23 06:56:05,969 >> loading configuration file saves/LLaMA3-8B-Chat/full/train_2024-07-23-06-00-05_llama3/config.json
|
74 |
|
75 |
+
[INFO|configuration_utils.py:800] 2024-07-23 06:56:05,970 >> Model config LlamaConfig {
|
76 |
+
"_name_or_path": "saves/LLaMA3-8B-Chat/full/train_2024-07-23-06-00-05_llama3",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
"architectures": [
|
78 |
"LlamaForCausalLM"
|
79 |
],
|
|
|
98 |
"tie_word_embeddings": false,
|
99 |
"torch_dtype": "bfloat16",
|
100 |
"transformers_version": "4.42.3",
|
101 |
+
"use_cache": false,
|
102 |
"vocab_size": 128256
|
103 |
}
|
104 |
|
105 |
|
106 |
+
[INFO|patcher.py:81] 2024-07-23 06:56:05,971 >> Using KV cache for faster generation.
|
107 |
|
108 |
+
[INFO|modeling_utils.py:3553] 2024-07-23 06:56:05,998 >> loading weights file saves/LLaMA3-8B-Chat/full/train_2024-07-23-06-00-05_llama3/model.safetensors.index.json
|
109 |
|
110 |
+
[INFO|modeling_utils.py:1531] 2024-07-23 06:56:05,999 >> Instantiating LlamaForCausalLM model under default dtype torch.bfloat16.
|
111 |
+
|
112 |
+
[INFO|configuration_utils.py:1000] 2024-07-23 06:56:06,002 >> Generate config GenerationConfig {
|
113 |
"bos_token_id": 128000,
|
114 |
"eos_token_id": 128009
|
115 |
}
|
116 |
|
117 |
|
118 |
+
07/23/2024 06:56:06 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
|
|
|
|
|
|
|
|
|
119 |
|
120 |
+
07/23/2024 06:56:06 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
|
121 |
|
122 |
+
07/23/2024 06:56:06 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
|
123 |
|
124 |
+
07/23/2024 06:56:06 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
|
125 |
|
126 |
+
07/23/2024 06:56:06 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
|
127 |
|
128 |
+
07/23/2024 06:56:06 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
|
129 |
|
130 |
+
07/23/2024 06:56:06 - INFO - llamafactory.model.patcher - Using KV cache for faster generation.
|
131 |
|
132 |
+
[INFO|modeling_utils.py:4364] 2024-07-23 06:56:10,304 >> All model checkpoint weights were used when initializing LlamaForCausalLM.
|
133 |
|
|
|
134 |
|
135 |
+
[INFO|modeling_utils.py:4372] 2024-07-23 06:56:10,304 >> All the weights of LlamaForCausalLM were initialized from the model checkpoint at saves/LLaMA3-8B-Chat/full/train_2024-07-23-06-00-05_llama3.
|
136 |
+
If your task is similar to the task the model of the checkpoint was trained on, you can already use LlamaForCausalLM for predictions without further training.
|
137 |
|
138 |
+
[INFO|configuration_utils.py:953] 2024-07-23 06:56:10,308 >> loading configuration file saves/LLaMA3-8B-Chat/full/train_2024-07-23-06-00-05_llama3/generation_config.json
|
139 |
|
140 |
+
[INFO|configuration_utils.py:1000] 2024-07-23 06:56:10,308 >> Generate config GenerationConfig {
|
141 |
"bos_token_id": 128000,
|
142 |
"do_sample": true,
|
143 |
"eos_token_id": [
|
|
|
150 |
}
|
151 |
|
152 |
|
153 |
+
[INFO|attention.py:80] 2024-07-23 06:56:10,317 >> Using torch SDPA for faster training and inference.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
154 |
|
155 |
+
[INFO|loader.py:196] 2024-07-23 06:56:10,322 >> all params: 8,030,261,248
|
156 |
|
157 |
+
[INFO|trainer.py:3788] 2024-07-23 06:56:10,436 >>
|
158 |
+
***** Running Prediction *****
|
159 |
|
160 |
+
[INFO|trainer.py:3790] 2024-07-23 06:56:10,436 >> Num examples = 1243
|
161 |
|
162 |
+
[INFO|trainer.py:3793] 2024-07-23 06:56:10,436 >> Batch size = 2
|
163 |
|
164 |
+
[WARNING|logging.py:328] 2024-07-23 06:56:11,121 >> We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)
|
165 |
|
166 |
+
07/23/2024 06:56:11 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
167 |
|
168 |
+
07/23/2024 06:56:11 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
169 |
|
170 |
+
07/23/2024 06:56:11 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
171 |
|
172 |
+
07/23/2024 06:56:11 - INFO - llamafactory.model.loader - all params: 8,030,261,248
|
173 |
|
174 |
+
07/23/2024 06:56:11 - INFO - llamafactory.model.loader - all params: 8,030,261,248
|
175 |
|
176 |
+
07/23/2024 06:56:11 - INFO - llamafactory.model.loader - all params: 8,030,261,248
|
177 |
|
178 |
+
07/23/2024 06:56:11 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
179 |
|
180 |
+
07/23/2024 06:56:11 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
181 |
|
182 |
+
07/23/2024 06:56:11 - INFO - llamafactory.model.loader - all params: 8,030,261,248
|
183 |
|
184 |
+
07/23/2024 06:56:11 - INFO - llamafactory.model.loader - all params: 8,030,261,248
|
185 |
|
186 |
+
07/23/2024 06:56:11 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
187 |
|
188 |
+
07/23/2024 06:56:11 - INFO - llamafactory.model.loader - all params: 8,030,261,248
|
189 |
|
190 |
+
07/23/2024 06:56:11 - INFO - llamafactory.model.model_utils.attention - Using torch SDPA for faster training and inference.
|
191 |
|
192 |
+
07/23/2024 06:56:11 - INFO - llamafactory.model.loader - all params: 8,030,261,248
|
193 |
|
194 |
+
07/23/2024 06:56:12 - WARNING - transformers.models.llama.modeling_llama - We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)
|
195 |
|
196 |
+
07/23/2024 06:56:12 - WARNING - transformers.models.llama.modeling_llama - We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)
|
197 |
|
198 |
+
07/23/2024 06:56:12 - WARNING - transformers.models.llama.modeling_llama - We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)
|
199 |
|
200 |
+
07/23/2024 06:56:12 - WARNING - transformers.models.llama.modeling_llama - We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)
|
201 |
|
202 |
+
07/23/2024 06:56:12 - WARNING - transformers.models.llama.modeling_llama - We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)
|
203 |
|
204 |
+
07/23/2024 06:56:12 - WARNING - transformers.models.llama.modeling_llama - We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)
|
205 |
|
206 |
+
07/23/2024 06:56:12 - WARNING - transformers.models.llama.modeling_llama - We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)
|
207 |
|
208 |
+
[INFO|trainer.py:127] 2024-07-23 06:56:19,786 >> Saving prediction results to saves/LLaMA3-8B-Chat/full/eval_2024-07-23-06-52-5_llama3/generated_predictions.jsonl
|
|
|
209 |
|
trainer_log.jsonl
CHANGED
@@ -1,191 +1,15 @@
|
|
1 |
-
{"current_steps":
|
2 |
-
{"current_steps":
|
3 |
-
{"current_steps":
|
4 |
-
{"current_steps":
|
5 |
-
{"current_steps":
|
6 |
-
{"current_steps":
|
7 |
-
{"current_steps":
|
8 |
-
{"current_steps":
|
9 |
-
{"current_steps":
|
10 |
-
{"current_steps":
|
11 |
-
{"current_steps":
|
12 |
-
{"current_steps":
|
13 |
-
{"current_steps":
|
14 |
-
{"current_steps":
|
15 |
-
{"current_steps":
|
16 |
-
{"current_steps": 16, "total_steps": 190, "loss": 1.1868, "learning_rate": 4.986304738420684e-06, "epoch": 0.4115755627009646, "percentage": 8.42, "elapsed_time": "0:03:33", "remaining_time": "0:38:44", "throughput": "474.17", "total_tokens": 101360}
|
17 |
-
{"current_steps": 17, "total_steps": 190, "loss": 0.5418, "learning_rate": 4.981365379103306e-06, "epoch": 0.43729903536977494, "percentage": 8.95, "elapsed_time": "0:03:46", "remaining_time": "0:38:29", "throughput": "473.57", "total_tokens": 107488}
|
18 |
-
{"current_steps": 18, "total_steps": 190, "loss": 0.2263, "learning_rate": 4.975670171853926e-06, "epoch": 0.4630225080385852, "percentage": 9.47, "elapsed_time": "0:04:00", "remaining_time": "0:38:14", "throughput": "474.35", "total_tokens": 113920}
|
19 |
-
{"current_steps": 19, "total_steps": 190, "loss": 0.1612, "learning_rate": 4.9692208514878445e-06, "epoch": 0.4887459807073955, "percentage": 10.0, "elapsed_time": "0:04:13", "remaining_time": "0:38:00", "throughput": "475.02", "total_tokens": 120352}
|
20 |
-
{"current_steps": 20, "total_steps": 190, "loss": 0.3299, "learning_rate": 4.962019382530521e-06, "epoch": 0.5144694533762058, "percentage": 10.53, "elapsed_time": "0:04:26", "remaining_time": "0:37:45", "throughput": "475.97", "total_tokens": 126880}
|
21 |
-
{"current_steps": 21, "total_steps": 190, "loss": 0.2013, "learning_rate": 4.9540679586191605e-06, "epoch": 0.5401929260450161, "percentage": 11.05, "elapsed_time": "0:04:39", "remaining_time": "0:37:31", "throughput": "476.60", "total_tokens": 133344}
|
22 |
-
{"current_steps": 22, "total_steps": 190, "loss": 0.2446, "learning_rate": 4.9453690018345144e-06, "epoch": 0.5659163987138264, "percentage": 11.58, "elapsed_time": "0:04:52", "remaining_time": "0:37:17", "throughput": "477.00", "total_tokens": 139744}
|
23 |
-
{"current_steps": 23, "total_steps": 190, "loss": 0.2235, "learning_rate": 4.935925161963089e-06, "epoch": 0.5916398713826366, "percentage": 12.11, "elapsed_time": "0:05:06", "remaining_time": "0:37:03", "throughput": "477.33", "total_tokens": 146144}
|
24 |
-
{"current_steps": 24, "total_steps": 190, "loss": 0.116, "learning_rate": 4.925739315689991e-06, "epoch": 0.617363344051447, "percentage": 12.63, "elapsed_time": "0:05:19", "remaining_time": "0:36:49", "throughput": "476.68", "total_tokens": 152240}
|
25 |
-
{"current_steps": 25, "total_steps": 190, "loss": 0.2179, "learning_rate": 4.914814565722671e-06, "epoch": 0.6430868167202572, "percentage": 13.16, "elapsed_time": "0:05:32", "remaining_time": "0:36:34", "throughput": "476.63", "total_tokens": 158512}
|
26 |
-
{"current_steps": 26, "total_steps": 190, "loss": 0.1414, "learning_rate": 4.903154239845798e-06, "epoch": 0.6688102893890675, "percentage": 13.68, "elapsed_time": "0:05:45", "remaining_time": "0:36:20", "throughput": "476.58", "total_tokens": 164784}
|
27 |
-
{"current_steps": 27, "total_steps": 190, "loss": 0.1181, "learning_rate": 4.890761889907589e-06, "epoch": 0.6945337620578779, "percentage": 14.21, "elapsed_time": "0:05:58", "remaining_time": "0:36:07", "throughput": "477.26", "total_tokens": 171312}
|
28 |
-
{"current_steps": 28, "total_steps": 190, "loss": 0.2753, "learning_rate": 4.8776412907378845e-06, "epoch": 0.7202572347266881, "percentage": 14.74, "elapsed_time": "0:06:12", "remaining_time": "0:35:53", "throughput": "477.91", "total_tokens": 177856}
|
29 |
-
{"current_steps": 29, "total_steps": 190, "loss": 0.3255, "learning_rate": 4.863796438998293e-06, "epoch": 0.7459807073954984, "percentage": 15.26, "elapsed_time": "0:06:25", "remaining_time": "0:35:39", "throughput": "478.44", "total_tokens": 184368}
|
30 |
-
{"current_steps": 30, "total_steps": 190, "loss": 0.2352, "learning_rate": 4.849231551964771e-06, "epoch": 0.7717041800643086, "percentage": 15.79, "elapsed_time": "0:06:38", "remaining_time": "0:35:25", "throughput": "479.06", "total_tokens": 190928}
|
31 |
-
{"current_steps": 31, "total_steps": 190, "loss": 0.063, "learning_rate": 4.833951066243004e-06, "epoch": 0.797427652733119, "percentage": 16.32, "elapsed_time": "0:06:51", "remaining_time": "0:35:11", "throughput": "479.01", "total_tokens": 197232}
|
32 |
-
{"current_steps": 32, "total_steps": 190, "loss": 0.2042, "learning_rate": 4.817959636416969e-06, "epoch": 0.8231511254019293, "percentage": 16.84, "elapsed_time": "0:07:04", "remaining_time": "0:34:58", "throughput": "479.09", "total_tokens": 203584}
|
33 |
-
{"current_steps": 33, "total_steps": 190, "loss": 0.1364, "learning_rate": 4.801262133631101e-06, "epoch": 0.8488745980707395, "percentage": 17.37, "elapsed_time": "0:07:18", "remaining_time": "0:34:44", "throughput": "479.59", "total_tokens": 210128}
|
34 |
-
{"current_steps": 34, "total_steps": 190, "loss": 0.0934, "learning_rate": 4.783863644106502e-06, "epoch": 0.8745980707395499, "percentage": 17.89, "elapsed_time": "0:07:31", "remaining_time": "0:34:30", "throughput": "479.71", "total_tokens": 216512}
|
35 |
-
{"current_steps": 35, "total_steps": 190, "loss": 0.1332, "learning_rate": 4.765769467591626e-06, "epoch": 0.9003215434083601, "percentage": 18.42, "elapsed_time": "0:07:44", "remaining_time": "0:34:17", "throughput": "479.91", "total_tokens": 222928}
|
36 |
-
{"current_steps": 36, "total_steps": 190, "loss": 0.1595, "learning_rate": 4.746985115747918e-06, "epoch": 0.9260450160771704, "percentage": 18.95, "elapsed_time": "0:07:57", "remaining_time": "0:34:03", "throughput": "479.86", "total_tokens": 229232}
|
37 |
-
{"current_steps": 37, "total_steps": 190, "loss": 0.1528, "learning_rate": 4.72751631047092e-06, "epoch": 0.9517684887459807, "percentage": 19.47, "elapsed_time": "0:08:10", "remaining_time": "0:33:50", "throughput": "480.07", "total_tokens": 235680}
|
38 |
-
{"current_steps": 38, "total_steps": 190, "loss": 0.1342, "learning_rate": 4.707368982147318e-06, "epoch": 0.977491961414791, "percentage": 20.0, "elapsed_time": "0:08:24", "remaining_time": "0:33:36", "throughput": "480.42", "total_tokens": 242192}
|
39 |
-
{"current_steps": 39, "total_steps": 190, "loss": 0.1586, "learning_rate": 4.68654926784849e-06, "epoch": 1.0032154340836013, "percentage": 20.53, "elapsed_time": "0:08:37", "remaining_time": "0:33:22", "throughput": "480.69", "total_tokens": 248672}
|
40 |
-
{"current_steps": 40, "total_steps": 190, "loss": 0.1072, "learning_rate": 4.665063509461098e-06, "epoch": 1.0289389067524115, "percentage": 21.05, "elapsed_time": "0:08:50", "remaining_time": "0:33:09", "throughput": "480.80", "total_tokens": 255072}
|
41 |
-
{"current_steps": 41, "total_steps": 190, "loss": 0.0357, "learning_rate": 4.642918251755281e-06, "epoch": 1.0546623794212218, "percentage": 21.58, "elapsed_time": "0:09:03", "remaining_time": "0:32:55", "throughput": "481.10", "total_tokens": 261584}
|
42 |
-
{"current_steps": 42, "total_steps": 190, "loss": 0.06, "learning_rate": 4.620120240391065e-06, "epoch": 1.0803858520900322, "percentage": 22.11, "elapsed_time": "0:09:16", "remaining_time": "0:32:42", "throughput": "481.28", "total_tokens": 268032}
|
43 |
-
{"current_steps": 43, "total_steps": 190, "loss": 0.0902, "learning_rate": 4.596676419863561e-06, "epoch": 1.1061093247588425, "percentage": 22.63, "elapsed_time": "0:09:30", "remaining_time": "0:32:28", "throughput": "481.59", "total_tokens": 274560}
|
44 |
-
{"current_steps": 44, "total_steps": 190, "loss": 0.0202, "learning_rate": 4.572593931387604e-06, "epoch": 1.1318327974276527, "percentage": 23.16, "elapsed_time": "0:09:43", "remaining_time": "0:32:15", "throughput": "481.67", "total_tokens": 280960}
|
45 |
-
{"current_steps": 45, "total_steps": 190, "loss": 0.038, "learning_rate": 4.54788011072248e-06, "epoch": 1.157556270096463, "percentage": 23.68, "elapsed_time": "0:09:56", "remaining_time": "0:32:02", "throughput": "481.31", "total_tokens": 287104}
|
46 |
-
{"current_steps": 46, "total_steps": 190, "loss": 0.0379, "learning_rate": 4.522542485937369e-06, "epoch": 1.1832797427652733, "percentage": 24.21, "elapsed_time": "0:10:09", "remaining_time": "0:31:48", "throughput": "481.42", "total_tokens": 293520}
|
47 |
-
{"current_steps": 47, "total_steps": 190, "loss": 0.0742, "learning_rate": 4.496588775118232e-06, "epoch": 1.2090032154340835, "percentage": 24.74, "elapsed_time": "0:10:22", "remaining_time": "0:31:35", "throughput": "481.51", "total_tokens": 299936}
|
48 |
-
{"current_steps": 48, "total_steps": 190, "loss": 0.0658, "learning_rate": 4.470026884016805e-06, "epoch": 1.234726688102894, "percentage": 25.26, "elapsed_time": "0:10:36", "remaining_time": "0:31:21", "throughput": "480.94", "total_tokens": 305936}
|
49 |
-
{"current_steps": 49, "total_steps": 190, "loss": 0.0336, "learning_rate": 4.442864903642428e-06, "epoch": 1.2604501607717042, "percentage": 25.79, "elapsed_time": "0:10:49", "remaining_time": "0:31:08", "throughput": "481.32", "total_tokens": 312528}
|
50 |
-
{"current_steps": 50, "total_steps": 190, "loss": 0.1021, "learning_rate": 4.415111107797445e-06, "epoch": 1.2861736334405145, "percentage": 26.32, "elapsed_time": "0:11:02", "remaining_time": "0:30:55", "throughput": "481.12", "total_tokens": 318752}
|
51 |
-
{"current_steps": 51, "total_steps": 190, "loss": 0.1312, "learning_rate": 4.386773950556931e-06, "epoch": 1.3118971061093248, "percentage": 26.84, "elapsed_time": "0:11:15", "remaining_time": "0:30:41", "throughput": "481.10", "total_tokens": 325088}
|
52 |
-
{"current_steps": 52, "total_steps": 190, "loss": 0.0665, "learning_rate": 4.357862063693486e-06, "epoch": 1.337620578778135, "percentage": 27.37, "elapsed_time": "0:11:28", "remaining_time": "0:30:28", "throughput": "481.08", "total_tokens": 331424}
|
53 |
-
{"current_steps": 53, "total_steps": 190, "loss": 0.0679, "learning_rate": 4.328384254047927e-06, "epoch": 1.3633440514469453, "percentage": 27.89, "elapsed_time": "0:11:42", "remaining_time": "0:30:14", "throughput": "481.08", "total_tokens": 337776}
|
54 |
-
{"current_steps": 54, "total_steps": 190, "loss": 0.0579, "learning_rate": 4.2983495008466285e-06, "epoch": 1.3890675241157555, "percentage": 28.42, "elapsed_time": "0:11:55", "remaining_time": "0:30:01", "throughput": "480.90", "total_tokens": 344000}
|
55 |
-
{"current_steps": 55, "total_steps": 190, "loss": 0.0542, "learning_rate": 4.267766952966369e-06, "epoch": 1.414790996784566, "percentage": 28.95, "elapsed_time": "0:12:08", "remaining_time": "0:29:48", "throughput": "481.15", "total_tokens": 350528}
|
56 |
-
{"current_steps": 56, "total_steps": 190, "loss": 0.0476, "learning_rate": 4.236645926147493e-06, "epoch": 1.4405144694533762, "percentage": 29.47, "elapsed_time": "0:12:21", "remaining_time": "0:29:34", "throughput": "481.17", "total_tokens": 356896}
|
57 |
-
{"current_steps": 57, "total_steps": 190, "loss": 0.0613, "learning_rate": 4.204995900156247e-06, "epoch": 1.4662379421221865, "percentage": 30.0, "elapsed_time": "0:12:34", "remaining_time": "0:29:21", "throughput": "481.35", "total_tokens": 363376}
|
58 |
-
{"current_steps": 58, "total_steps": 190, "loss": 0.0995, "learning_rate": 4.172826515897146e-06, "epoch": 1.4919614147909968, "percentage": 30.53, "elapsed_time": "0:12:48", "remaining_time": "0:29:08", "throughput": "481.29", "total_tokens": 369680}
|
59 |
-
{"current_steps": 59, "total_steps": 190, "loss": 0.0532, "learning_rate": 4.140147572476269e-06, "epoch": 1.517684887459807, "percentage": 31.05, "elapsed_time": "0:13:01", "remaining_time": "0:28:54", "throughput": "481.12", "total_tokens": 375904}
|
60 |
-
{"current_steps": 60, "total_steps": 190, "loss": 0.0824, "learning_rate": 4.106969024216348e-06, "epoch": 1.5434083601286175, "percentage": 31.58, "elapsed_time": "0:13:14", "remaining_time": "0:28:41", "throughput": "481.19", "total_tokens": 382304}
|
61 |
-
{"current_steps": 61, "total_steps": 190, "loss": 0.0499, "learning_rate": 4.073300977624594e-06, "epoch": 1.5691318327974275, "percentage": 32.11, "elapsed_time": "0:13:27", "remaining_time": "0:28:28", "throughput": "481.24", "total_tokens": 388688}
|
62 |
-
{"current_steps": 62, "total_steps": 190, "loss": 0.0413, "learning_rate": 4.039153688314146e-06, "epoch": 1.594855305466238, "percentage": 32.63, "elapsed_time": "0:13:40", "remaining_time": "0:28:14", "throughput": "481.38", "total_tokens": 395152}
|
63 |
-
{"current_steps": 63, "total_steps": 190, "loss": 0.0637, "learning_rate": 4.0045375578801216e-06, "epoch": 1.6205787781350482, "percentage": 33.16, "elapsed_time": "0:13:54", "remaining_time": "0:28:01", "throughput": "481.64", "total_tokens": 401728}
|
64 |
-
{"current_steps": 64, "total_steps": 190, "loss": 0.0529, "learning_rate": 3.969463130731183e-06, "epoch": 1.6463022508038585, "percentage": 33.68, "elapsed_time": "0:14:07", "remaining_time": "0:27:48", "throughput": "481.43", "total_tokens": 407904}
|
65 |
-
{"current_steps": 65, "total_steps": 190, "loss": 0.0474, "learning_rate": 3.933941090877615e-06, "epoch": 1.6720257234726688, "percentage": 34.21, "elapsed_time": "0:14:20", "remaining_time": "0:27:34", "throughput": "481.41", "total_tokens": 414240}
|
66 |
-
{"current_steps": 66, "total_steps": 190, "loss": 0.0649, "learning_rate": 3.897982258676867e-06, "epoch": 1.697749196141479, "percentage": 34.74, "elapsed_time": "0:14:33", "remaining_time": "0:27:21", "throughput": "481.25", "total_tokens": 420448}
|
67 |
-
{"current_steps": 67, "total_steps": 190, "loss": 0.0505, "learning_rate": 3.861597587537568e-06, "epoch": 1.7234726688102895, "percentage": 35.26, "elapsed_time": "0:14:46", "remaining_time": "0:27:08", "throughput": "481.23", "total_tokens": 426784}
|
68 |
-
{"current_steps": 68, "total_steps": 190, "loss": 0.0621, "learning_rate": 3.824798160583012e-06, "epoch": 1.7491961414790995, "percentage": 35.79, "elapsed_time": "0:15:00", "remaining_time": "0:26:54", "throughput": "480.87", "total_tokens": 432816}
|
69 |
-
{"current_steps": 69, "total_steps": 190, "loss": 0.0769, "learning_rate": 3.787595187275136e-06, "epoch": 1.77491961414791, "percentage": 36.32, "elapsed_time": "0:15:13", "remaining_time": "0:26:41", "throughput": "480.95", "total_tokens": 439232}
|
70 |
-
{"current_steps": 70, "total_steps": 190, "loss": 0.0435, "learning_rate": 3.7500000000000005e-06, "epoch": 1.8006430868167203, "percentage": 36.84, "elapsed_time": "0:15:26", "remaining_time": "0:26:28", "throughput": "481.18", "total_tokens": 445792}
|
71 |
-
{"current_steps": 71, "total_steps": 190, "loss": 0.0673, "learning_rate": 3.7120240506158433e-06, "epoch": 1.8263665594855305, "percentage": 37.37, "elapsed_time": "0:15:39", "remaining_time": "0:26:14", "throughput": "481.46", "total_tokens": 452400}
|
72 |
-
{"current_steps": 72, "total_steps": 190, "loss": 0.1316, "learning_rate": 3.6736789069647273e-06, "epoch": 1.852090032154341, "percentage": 37.89, "elapsed_time": "0:15:52", "remaining_time": "0:26:01", "throughput": "481.22", "total_tokens": 458528}
|
73 |
-
{"current_steps": 73, "total_steps": 190, "loss": 0.0531, "learning_rate": 3.634976249348867e-06, "epoch": 1.877813504823151, "percentage": 38.42, "elapsed_time": "0:16:06", "remaining_time": "0:25:48", "throughput": "481.32", "total_tokens": 464976}
|
74 |
-
{"current_steps": 74, "total_steps": 190, "loss": 0.0287, "learning_rate": 3.595927866972694e-06, "epoch": 1.9035369774919615, "percentage": 38.95, "elapsed_time": "0:16:19", "remaining_time": "0:25:35", "throughput": "481.43", "total_tokens": 471440}
|
75 |
-
{"current_steps": 75, "total_steps": 190, "loss": 0.0648, "learning_rate": 3.556545654351749e-06, "epoch": 1.9292604501607717, "percentage": 39.47, "elapsed_time": "0:16:32", "remaining_time": "0:25:21", "throughput": "481.41", "total_tokens": 477776}
|
76 |
-
{"current_steps": 76, "total_steps": 190, "loss": 0.1211, "learning_rate": 3.516841607689501e-06, "epoch": 1.954983922829582, "percentage": 40.0, "elapsed_time": "0:16:45", "remaining_time": "0:25:08", "throughput": "481.37", "total_tokens": 484096}
|
77 |
-
{"current_steps": 77, "total_steps": 190, "loss": 0.0879, "learning_rate": 3.476827821223184e-06, "epoch": 1.9807073954983923, "percentage": 40.53, "elapsed_time": "0:16:58", "remaining_time": "0:24:55", "throughput": "481.10", "total_tokens": 490176}
|
78 |
-
{"current_steps": 78, "total_steps": 190, "loss": 0.0227, "learning_rate": 3.436516483539781e-06, "epoch": 2.0064308681672025, "percentage": 41.05, "elapsed_time": "0:17:12", "remaining_time": "0:24:41", "throughput": "481.24", "total_tokens": 496672}
|
79 |
-
{"current_steps": 79, "total_steps": 190, "loss": 0.0228, "learning_rate": 3.39591987386325e-06, "epoch": 2.032154340836013, "percentage": 41.58, "elapsed_time": "0:17:25", "remaining_time": "0:24:28", "throughput": "481.30", "total_tokens": 503088}
|
80 |
-
{"current_steps": 80, "total_steps": 190, "loss": 0.036, "learning_rate": 3.3550503583141726e-06, "epoch": 2.057877813504823, "percentage": 42.11, "elapsed_time": "0:17:38", "remaining_time": "0:24:15", "throughput": "481.33", "total_tokens": 509472}
|
81 |
-
{"current_steps": 81, "total_steps": 190, "loss": 0.0138, "learning_rate": 3.313920386142892e-06, "epoch": 2.0836012861736335, "percentage": 42.63, "elapsed_time": "0:17:51", "remaining_time": "0:24:02", "throughput": "481.23", "total_tokens": 515728}
|
82 |
-
{"current_steps": 82, "total_steps": 190, "loss": 0.0697, "learning_rate": 3.272542485937369e-06, "epoch": 2.1093247588424435, "percentage": 43.16, "elapsed_time": "0:18:04", "remaining_time": "0:23:48", "throughput": "481.06", "total_tokens": 521904}
|
83 |
-
{"current_steps": 83, "total_steps": 190, "loss": 0.0508, "learning_rate": 3.230929261806842e-06, "epoch": 2.135048231511254, "percentage": 43.68, "elapsed_time": "0:18:18", "remaining_time": "0:23:35", "throughput": "480.79", "total_tokens": 527952}
|
84 |
-
{"current_steps": 84, "total_steps": 190, "loss": 0.0088, "learning_rate": 3.189093389542498e-06, "epoch": 2.1607717041800645, "percentage": 44.21, "elapsed_time": "0:18:31", "remaining_time": "0:23:22", "throughput": "481.08", "total_tokens": 534624}
|
85 |
-
{"current_steps": 85, "total_steps": 190, "loss": 0.0158, "learning_rate": 3.147047612756302e-06, "epoch": 2.1864951768488745, "percentage": 44.74, "elapsed_time": "0:18:44", "remaining_time": "0:23:09", "throughput": "481.26", "total_tokens": 541168}
|
86 |
-
{"current_steps": 86, "total_steps": 190, "loss": 0.006, "learning_rate": 3.1048047389991693e-06, "epoch": 2.212218649517685, "percentage": 45.26, "elapsed_time": "0:18:57", "remaining_time": "0:22:55", "throughput": "481.23", "total_tokens": 547488}
|
87 |
-
{"current_steps": 87, "total_steps": 190, "loss": 0.038, "learning_rate": 3.062377635859663e-06, "epoch": 2.237942122186495, "percentage": 45.79, "elapsed_time": "0:19:10", "remaining_time": "0:22:42", "throughput": "481.60", "total_tokens": 554272}
|
88 |
-
{"current_steps": 88, "total_steps": 190, "loss": 0.0004, "learning_rate": 3.019779227044398e-06, "epoch": 2.2636655948553055, "percentage": 46.32, "elapsed_time": "0:19:24", "remaining_time": "0:22:29", "throughput": "481.51", "total_tokens": 560528}
|
89 |
-
{"current_steps": 89, "total_steps": 190, "loss": 0.0111, "learning_rate": 2.9770224884413625e-06, "epoch": 2.289389067524116, "percentage": 46.84, "elapsed_time": "0:19:37", "remaining_time": "0:22:16", "throughput": "481.43", "total_tokens": 566784}
|
90 |
-
{"current_steps": 90, "total_steps": 190, "loss": 0.0008, "learning_rate": 2.9341204441673267e-06, "epoch": 2.315112540192926, "percentage": 47.37, "elapsed_time": "0:19:50", "remaining_time": "0:22:02", "throughput": "481.60", "total_tokens": 573344}
|
91 |
-
{"current_steps": 91, "total_steps": 190, "loss": 0.0182, "learning_rate": 2.8910861626005774e-06, "epoch": 2.3408360128617365, "percentage": 47.89, "elapsed_time": "0:20:03", "remaining_time": "0:21:49", "throughput": "481.67", "total_tokens": 579776}
|
92 |
-
{"current_steps": 92, "total_steps": 190, "loss": 0.0491, "learning_rate": 2.847932752400164e-06, "epoch": 2.3665594855305465, "percentage": 48.42, "elapsed_time": "0:20:16", "remaining_time": "0:21:36", "throughput": "481.64", "total_tokens": 586096}
|
93 |
-
{"current_steps": 93, "total_steps": 190, "loss": 0.004, "learning_rate": 2.804673358512869e-06, "epoch": 2.392282958199357, "percentage": 48.95, "elapsed_time": "0:20:30", "remaining_time": "0:21:22", "throughput": "481.71", "total_tokens": 592528}
|
94 |
-
{"current_steps": 94, "total_steps": 190, "loss": 0.0176, "learning_rate": 2.761321158169134e-06, "epoch": 2.418006430868167, "percentage": 49.47, "elapsed_time": "0:20:43", "remaining_time": "0:21:09", "throughput": "481.77", "total_tokens": 598960}
|
95 |
-
{"current_steps": 95, "total_steps": 190, "loss": 0.019, "learning_rate": 2.717889356869146e-06, "epoch": 2.4437299035369775, "percentage": 50.0, "elapsed_time": "0:20:56", "remaining_time": "0:20:56", "throughput": "481.70", "total_tokens": 605232}
|
96 |
-
{"current_steps": 96, "total_steps": 190, "loss": 0.027, "learning_rate": 2.6743911843603134e-06, "epoch": 2.469453376205788, "percentage": 50.53, "elapsed_time": "0:21:09", "remaining_time": "0:20:43", "throughput": "481.50", "total_tokens": 611344}
|
97 |
-
{"current_steps": 97, "total_steps": 190, "loss": 0.0354, "learning_rate": 2.6308398906073603e-06, "epoch": 2.495176848874598, "percentage": 51.05, "elapsed_time": "0:21:22", "remaining_time": "0:20:29", "throughput": "481.51", "total_tokens": 617712}
|
98 |
-
{"current_steps": 98, "total_steps": 190, "loss": 0.0741, "learning_rate": 2.587248741756253e-06, "epoch": 2.5209003215434085, "percentage": 51.58, "elapsed_time": "0:21:36", "remaining_time": "0:20:16", "throughput": "481.62", "total_tokens": 624208}
|
99 |
-
{"current_steps": 99, "total_steps": 190, "loss": 0.0582, "learning_rate": 2.543631016093209e-06, "epoch": 2.5466237942122185, "percentage": 52.11, "elapsed_time": "0:21:49", "remaining_time": "0:20:03", "throughput": "481.56", "total_tokens": 630496}
|
100 |
-
{"current_steps": 100, "total_steps": 190, "loss": 0.0096, "learning_rate": 2.5e-06, "epoch": 2.572347266881029, "percentage": 52.63, "elapsed_time": "0:22:02", "remaining_time": "0:19:50", "throughput": "481.71", "total_tokens": 637040}
|
101 |
-
{"current_steps": 101, "total_steps": 190, "loss": 0.0263, "learning_rate": 2.4563689839067913e-06, "epoch": 2.598070739549839, "percentage": 53.16, "elapsed_time": "0:22:15", "remaining_time": "0:19:36", "throughput": "481.77", "total_tokens": 643472}
|
102 |
-
{"current_steps": 102, "total_steps": 190, "loss": 0.0121, "learning_rate": 2.4127512582437486e-06, "epoch": 2.6237942122186495, "percentage": 53.68, "elapsed_time": "0:22:28", "remaining_time": "0:19:23", "throughput": "481.72", "total_tokens": 649760}
|
103 |
-
{"current_steps": 103, "total_steps": 190, "loss": 0.0204, "learning_rate": 2.3691601093926406e-06, "epoch": 2.64951768488746, "percentage": 54.21, "elapsed_time": "0:22:42", "remaining_time": "0:19:10", "throughput": "481.70", "total_tokens": 656096}
|
104 |
-
{"current_steps": 104, "total_steps": 190, "loss": 0.0325, "learning_rate": 2.325608815639687e-06, "epoch": 2.67524115755627, "percentage": 54.74, "elapsed_time": "0:22:55", "remaining_time": "0:18:57", "throughput": "481.83", "total_tokens": 662624}
|
105 |
-
{"current_steps": 105, "total_steps": 190, "loss": 0.0076, "learning_rate": 2.2821106431308546e-06, "epoch": 2.7009646302250805, "percentage": 55.26, "elapsed_time": "0:23:08", "remaining_time": "0:18:43", "throughput": "481.67", "total_tokens": 668768}
|
106 |
-
{"current_steps": 106, "total_steps": 190, "loss": 0.0485, "learning_rate": 2.238678841830867e-06, "epoch": 2.7266881028938905, "percentage": 55.79, "elapsed_time": "0:23:21", "remaining_time": "0:18:30", "throughput": "481.57", "total_tokens": 674992}
|
107 |
-
{"current_steps": 107, "total_steps": 190, "loss": 0.007, "learning_rate": 2.195326641487132e-06, "epoch": 2.752411575562701, "percentage": 56.32, "elapsed_time": "0:23:34", "remaining_time": "0:18:17", "throughput": "481.41", "total_tokens": 681120}
|
108 |
-
{"current_steps": 108, "total_steps": 190, "loss": 0.0347, "learning_rate": 2.1520672475998374e-06, "epoch": 2.778135048231511, "percentage": 56.84, "elapsed_time": "0:23:48", "remaining_time": "0:18:04", "throughput": "481.34", "total_tokens": 687376}
|
109 |
-
{"current_steps": 109, "total_steps": 190, "loss": 0.0142, "learning_rate": 2.1089138373994226e-06, "epoch": 2.8038585209003215, "percentage": 57.37, "elapsed_time": "0:24:01", "remaining_time": "0:17:51", "throughput": "481.52", "total_tokens": 693984}
|
110 |
-
{"current_steps": 110, "total_steps": 190, "loss": 0.0414, "learning_rate": 2.0658795558326745e-06, "epoch": 2.829581993569132, "percentage": 57.89, "elapsed_time": "0:24:14", "remaining_time": "0:17:37", "throughput": "481.52", "total_tokens": 700352}
|
111 |
-
{"current_steps": 111, "total_steps": 190, "loss": 0.0419, "learning_rate": 2.022977511558638e-06, "epoch": 2.855305466237942, "percentage": 58.42, "elapsed_time": "0:24:27", "remaining_time": "0:17:24", "throughput": "481.56", "total_tokens": 706768}
|
112 |
-
{"current_steps": 112, "total_steps": 190, "loss": 0.043, "learning_rate": 1.9802207729556023e-06, "epoch": 2.8810289389067525, "percentage": 58.95, "elapsed_time": "0:24:40", "remaining_time": "0:17:11", "throughput": "481.65", "total_tokens": 713248}
|
113 |
-
{"current_steps": 113, "total_steps": 190, "loss": 0.0192, "learning_rate": 1.937622364140338e-06, "epoch": 2.906752411575563, "percentage": 59.47, "elapsed_time": "0:24:54", "remaining_time": "0:16:58", "throughput": "481.62", "total_tokens": 719568}
|
114 |
-
{"current_steps": 114, "total_steps": 190, "loss": 0.0427, "learning_rate": 1.895195261000831e-06, "epoch": 2.932475884244373, "percentage": 60.0, "elapsed_time": "0:25:07", "remaining_time": "0:16:44", "throughput": "481.66", "total_tokens": 725984}
|
115 |
-
{"current_steps": 115, "total_steps": 190, "loss": 0.0116, "learning_rate": 1.852952387243698e-06, "epoch": 2.958199356913183, "percentage": 60.53, "elapsed_time": "0:25:20", "remaining_time": "0:16:31", "throughput": "481.58", "total_tokens": 732224}
|
116 |
-
{"current_steps": 116, "total_steps": 190, "loss": 0.0135, "learning_rate": 1.8109066104575023e-06, "epoch": 2.9839228295819935, "percentage": 61.05, "elapsed_time": "0:25:33", "remaining_time": "0:16:18", "throughput": "481.53", "total_tokens": 738496}
|
117 |
-
{"current_steps": 117, "total_steps": 190, "loss": 0.0128, "learning_rate": 1.7690707381931585e-06, "epoch": 3.009646302250804, "percentage": 61.58, "elapsed_time": "0:25:46", "remaining_time": "0:16:05", "throughput": "481.54", "total_tokens": 744880}
|
118 |
-
{"current_steps": 118, "total_steps": 190, "loss": 0.0021, "learning_rate": 1.7274575140626318e-06, "epoch": 3.035369774919614, "percentage": 62.11, "elapsed_time": "0:26:00", "remaining_time": "0:15:51", "throughput": "481.65", "total_tokens": 751408}
|
119 |
-
{"current_steps": 119, "total_steps": 190, "loss": 0.0057, "learning_rate": 1.686079613857109e-06, "epoch": 3.0610932475884245, "percentage": 62.63, "elapsed_time": "0:26:13", "remaining_time": "0:15:38", "throughput": "481.57", "total_tokens": 757632}
|
120 |
-
{"current_steps": 120, "total_steps": 190, "loss": 0.0197, "learning_rate": 1.6449496416858285e-06, "epoch": 3.0868167202572345, "percentage": 63.16, "elapsed_time": "0:26:26", "remaining_time": "0:15:25", "throughput": "481.54", "total_tokens": 763936}
|
121 |
-
{"current_steps": 121, "total_steps": 190, "loss": 0.0017, "learning_rate": 1.6040801261367494e-06, "epoch": 3.112540192926045, "percentage": 63.68, "elapsed_time": "0:26:39", "remaining_time": "0:15:12", "throughput": "481.40", "total_tokens": 770064}
|
122 |
-
{"current_steps": 122, "total_steps": 190, "loss": 0.0068, "learning_rate": 1.56348351646022e-06, "epoch": 3.1382636655948555, "percentage": 64.21, "elapsed_time": "0:26:52", "remaining_time": "0:14:58", "throughput": "481.24", "total_tokens": 776176}
|
123 |
-
{"current_steps": 123, "total_steps": 190, "loss": 0.0022, "learning_rate": 1.5231721787768162e-06, "epoch": 3.1639871382636655, "percentage": 64.74, "elapsed_time": "0:27:06", "remaining_time": "0:14:45", "throughput": "481.20", "total_tokens": 782464}
|
124 |
-
{"current_steps": 124, "total_steps": 190, "loss": 0.0162, "learning_rate": 1.4831583923105e-06, "epoch": 3.189710610932476, "percentage": 65.26, "elapsed_time": "0:27:19", "remaining_time": "0:14:32", "throughput": "481.37", "total_tokens": 789072}
|
125 |
-
{"current_steps": 125, "total_steps": 190, "loss": 0.0014, "learning_rate": 1.443454345648252e-06, "epoch": 3.215434083601286, "percentage": 65.79, "elapsed_time": "0:27:32", "remaining_time": "0:14:19", "throughput": "481.43", "total_tokens": 795536}
|
126 |
-
{"current_steps": 126, "total_steps": 190, "loss": 0.0063, "learning_rate": 1.4040721330273063e-06, "epoch": 3.2411575562700965, "percentage": 66.32, "elapsed_time": "0:27:45", "remaining_time": "0:14:06", "throughput": "481.43", "total_tokens": 801888}
|
127 |
-
{"current_steps": 127, "total_steps": 190, "loss": 0.0282, "learning_rate": 1.3650237506511333e-06, "epoch": 3.266881028938907, "percentage": 66.84, "elapsed_time": "0:27:58", "remaining_time": "0:13:52", "throughput": "481.54", "total_tokens": 808432}
|
128 |
-
{"current_steps": 128, "total_steps": 190, "loss": 0.0003, "learning_rate": 1.3263210930352737e-06, "epoch": 3.292604501607717, "percentage": 67.37, "elapsed_time": "0:28:12", "remaining_time": "0:13:39", "throughput": "481.60", "total_tokens": 814896}
|
129 |
-
{"current_steps": 129, "total_steps": 190, "loss": 0.0002, "learning_rate": 1.2879759493841577e-06, "epoch": 3.3183279742765275, "percentage": 67.89, "elapsed_time": "0:28:25", "remaining_time": "0:13:26", "throughput": "481.57", "total_tokens": 821200}
|
130 |
-
{"current_steps": 130, "total_steps": 190, "loss": 0.0004, "learning_rate": 1.2500000000000007e-06, "epoch": 3.3440514469453375, "percentage": 68.42, "elapsed_time": "0:28:38", "remaining_time": "0:13:13", "throughput": "481.59", "total_tokens": 827584}
|
131 |
-
{"current_steps": 131, "total_steps": 190, "loss": 0.0169, "learning_rate": 1.2124048127248644e-06, "epoch": 3.369774919614148, "percentage": 68.95, "elapsed_time": "0:28:51", "remaining_time": "0:12:59", "throughput": "481.65", "total_tokens": 834048}
|
132 |
-
{"current_steps": 132, "total_steps": 190, "loss": 0.0127, "learning_rate": 1.1752018394169882e-06, "epoch": 3.395498392282958, "percentage": 69.47, "elapsed_time": "0:29:04", "remaining_time": "0:12:46", "throughput": "481.55", "total_tokens": 840240}
|
133 |
-
{"current_steps": 133, "total_steps": 190, "loss": 0.0045, "learning_rate": 1.1384024124624324e-06, "epoch": 3.4212218649517685, "percentage": 70.0, "elapsed_time": "0:29:18", "remaining_time": "0:12:33", "throughput": "481.46", "total_tokens": 846448}
|
134 |
-
{"current_steps": 134, "total_steps": 190, "loss": 0.0924, "learning_rate": 1.1020177413231334e-06, "epoch": 3.446945337620579, "percentage": 70.53, "elapsed_time": "0:29:31", "remaining_time": "0:12:20", "throughput": "481.54", "total_tokens": 852928}
|
135 |
-
{"current_steps": 135, "total_steps": 190, "loss": 0.0067, "learning_rate": 1.0660589091223854e-06, "epoch": 3.472668810289389, "percentage": 71.05, "elapsed_time": "0:29:44", "remaining_time": "0:12:06", "throughput": "481.56", "total_tokens": 859312}
|
136 |
-
{"current_steps": 136, "total_steps": 190, "loss": 0.003, "learning_rate": 1.0305368692688175e-06, "epoch": 3.4983922829581995, "percentage": 71.58, "elapsed_time": "0:29:57", "remaining_time": "0:11:53", "throughput": "481.43", "total_tokens": 865440}
|
137 |
-
{"current_steps": 137, "total_steps": 190, "loss": 0.0164, "learning_rate": 9.95462442119879e-07, "epoch": 3.5241157556270095, "percentage": 72.11, "elapsed_time": "0:30:10", "remaining_time": "0:11:40", "throughput": "481.52", "total_tokens": 871968}
|
138 |
-
{"current_steps": 138, "total_steps": 190, "loss": 0.0018, "learning_rate": 9.608463116858544e-07, "epoch": 3.54983922829582, "percentage": 72.63, "elapsed_time": "0:30:24", "remaining_time": "0:11:27", "throughput": "481.54", "total_tokens": 878352}
|
139 |
-
{"current_steps": 139, "total_steps": 190, "loss": 0.0226, "learning_rate": 9.266990223754069e-07, "epoch": 3.57556270096463, "percentage": 73.16, "elapsed_time": "0:30:37", "remaining_time": "0:11:14", "throughput": "481.56", "total_tokens": 884736}
|
140 |
-
{"current_steps": 140, "total_steps": 190, "loss": 0.0008, "learning_rate": 8.930309757836517e-07, "epoch": 3.6012861736334405, "percentage": 73.68, "elapsed_time": "0:30:50", "remaining_time": "0:11:00", "throughput": "481.50", "total_tokens": 891008}
|
141 |
-
{"current_steps": 141, "total_steps": 190, "loss": 0.0004, "learning_rate": 8.598524275237321e-07, "epoch": 3.627009646302251, "percentage": 74.21, "elapsed_time": "0:31:03", "remaining_time": "0:10:47", "throughput": "481.37", "total_tokens": 897120}
|
142 |
-
{"current_steps": 142, "total_steps": 190, "loss": 0.0008, "learning_rate": 8.271734841028553e-07, "epoch": 3.652733118971061, "percentage": 74.74, "elapsed_time": "0:31:16", "remaining_time": "0:10:34", "throughput": "481.41", "total_tokens": 903536}
|
143 |
-
{"current_steps": 143, "total_steps": 190, "loss": 0.0256, "learning_rate": 7.950040998437541e-07, "epoch": 3.6784565916398715, "percentage": 75.26, "elapsed_time": "0:31:30", "remaining_time": "0:10:21", "throughput": "481.37", "total_tokens": 909824}
|
144 |
-
{"current_steps": 144, "total_steps": 190, "loss": 0.0005, "learning_rate": 7.633540738525066e-07, "epoch": 3.7041800643086815, "percentage": 75.79, "elapsed_time": "0:31:43", "remaining_time": "0:10:07", "throughput": "481.34", "total_tokens": 916112}
|
145 |
-
{"current_steps": 145, "total_steps": 190, "loss": 0.0045, "learning_rate": 7.322330470336314e-07, "epoch": 3.729903536977492, "percentage": 76.32, "elapsed_time": "0:31:56", "remaining_time": "0:09:54", "throughput": "481.41", "total_tokens": 922592}
|
146 |
-
{"current_steps": 146, "total_steps": 190, "loss": 0.0005, "learning_rate": 7.016504991533727e-07, "epoch": 3.755627009646302, "percentage": 76.84, "elapsed_time": "0:32:09", "remaining_time": "0:09:41", "throughput": "481.25", "total_tokens": 928640}
|
147 |
-
{"current_steps": 147, "total_steps": 190, "loss": 0.0069, "learning_rate": 6.716157459520739e-07, "epoch": 3.7813504823151125, "percentage": 77.37, "elapsed_time": "0:32:22", "remaining_time": "0:09:28", "throughput": "481.50", "total_tokens": 935488}
|
148 |
-
{"current_steps": 148, "total_steps": 190, "loss": 0.015, "learning_rate": 6.421379363065142e-07, "epoch": 3.807073954983923, "percentage": 77.89, "elapsed_time": "0:32:36", "remaining_time": "0:09:15", "throughput": "481.56", "total_tokens": 941952}
|
149 |
-
{"current_steps": 149, "total_steps": 190, "loss": 0.0012, "learning_rate": 6.1322604944307e-07, "epoch": 3.832797427652733, "percentage": 78.42, "elapsed_time": "0:32:49", "remaining_time": "0:09:01", "throughput": "481.59", "total_tokens": 948368}
|
150 |
-
{"current_steps": 150, "total_steps": 190, "loss": 0.0095, "learning_rate": 5.848888922025553e-07, "epoch": 3.8585209003215435, "percentage": 78.95, "elapsed_time": "0:33:02", "remaining_time": "0:08:48", "throughput": "481.60", "total_tokens": 954752}
|
151 |
-
{"current_steps": 151, "total_steps": 190, "loss": 0.0271, "learning_rate": 5.571350963575728e-07, "epoch": 3.884244372990354, "percentage": 79.47, "elapsed_time": "0:33:15", "remaining_time": "0:08:35", "throughput": "481.67", "total_tokens": 961248}
|
152 |
-
{"current_steps": 152, "total_steps": 190, "loss": 0.0201, "learning_rate": 5.299731159831953e-07, "epoch": 3.909967845659164, "percentage": 80.0, "elapsed_time": "0:33:28", "remaining_time": "0:08:22", "throughput": "481.58", "total_tokens": 967424}
|
153 |
-
{"current_steps": 153, "total_steps": 190, "loss": 0.012, "learning_rate": 5.034112248817685e-07, "epoch": 3.935691318327974, "percentage": 80.53, "elapsed_time": "0:33:42", "remaining_time": "0:08:08", "throughput": "481.54", "total_tokens": 973696}
|
154 |
-
{"current_steps": 154, "total_steps": 190, "loss": 0.023, "learning_rate": 4.774575140626317e-07, "epoch": 3.9614147909967845, "percentage": 81.05, "elapsed_time": "0:33:55", "remaining_time": "0:07:55", "throughput": "481.59", "total_tokens": 980144}
|
155 |
-
{"current_steps": 155, "total_steps": 190, "loss": 0.0156, "learning_rate": 4.5211988927752026e-07, "epoch": 3.987138263665595, "percentage": 81.58, "elapsed_time": "0:34:08", "remaining_time": "0:07:42", "throughput": "481.51", "total_tokens": 986352}
|
156 |
-
{"current_steps": 156, "total_steps": 190, "loss": 0.0009, "learning_rate": 4.27406068612396e-07, "epoch": 4.012861736334405, "percentage": 82.11, "elapsed_time": "0:34:21", "remaining_time": "0:07:29", "throughput": "481.59", "total_tokens": 992880}
|
157 |
-
{"current_steps": 157, "total_steps": 190, "loss": 0.0017, "learning_rate": 4.033235801364402e-07, "epoch": 4.038585209003215, "percentage": 82.63, "elapsed_time": "0:34:34", "remaining_time": "0:07:16", "throughput": "481.58", "total_tokens": 999200}
|
158 |
-
{"current_steps": 158, "total_steps": 190, "loss": 0.0015, "learning_rate": 3.798797596089351e-07, "epoch": 4.064308681672026, "percentage": 83.16, "elapsed_time": "0:34:48", "remaining_time": "0:07:02", "throughput": "481.58", "total_tokens": 1005568}
|
159 |
-
{"current_steps": 159, "total_steps": 190, "loss": 0.0035, "learning_rate": 3.5708174824471947e-07, "epoch": 4.090032154340836, "percentage": 83.68, "elapsed_time": "0:35:01", "remaining_time": "0:06:49", "throughput": "481.49", "total_tokens": 1011728}
|
160 |
-
{"current_steps": 160, "total_steps": 190, "loss": 0.0016, "learning_rate": 3.3493649053890325e-07, "epoch": 4.115755627009646, "percentage": 84.21, "elapsed_time": "0:35:14", "remaining_time": "0:06:36", "throughput": "481.58", "total_tokens": 1018272}
|
161 |
-
{"current_steps": 161, "total_steps": 190, "loss": 0.0028, "learning_rate": 3.134507321515107e-07, "epoch": 4.141479099678457, "percentage": 84.74, "elapsed_time": "0:35:27", "remaining_time": "0:06:23", "throughput": "481.61", "total_tokens": 1024688}
|
162 |
-
{"current_steps": 162, "total_steps": 190, "loss": 0.0006, "learning_rate": 2.9263101785268253e-07, "epoch": 4.167202572347267, "percentage": 85.26, "elapsed_time": "0:35:40", "remaining_time": "0:06:10", "throughput": "481.65", "total_tokens": 1031152}
|
163 |
-
{"current_steps": 163, "total_steps": 190, "loss": 0.0013, "learning_rate": 2.7248368952908055e-07, "epoch": 4.192926045016077, "percentage": 85.79, "elapsed_time": "0:35:54", "remaining_time": "0:05:56", "throughput": "481.71", "total_tokens": 1037632}
|
164 |
-
{"current_steps": 164, "total_steps": 190, "loss": 0.0006, "learning_rate": 2.53014884252083e-07, "epoch": 4.218649517684887, "percentage": 86.32, "elapsed_time": "0:36:07", "remaining_time": "0:05:43", "throughput": "481.69", "total_tokens": 1043936}
|
165 |
-
{"current_steps": 165, "total_steps": 190, "loss": 0.0017, "learning_rate": 2.3423053240837518e-07, "epoch": 4.244372990353698, "percentage": 86.84, "elapsed_time": "0:36:20", "remaining_time": "0:05:30", "throughput": "481.57", "total_tokens": 1050048}
|
166 |
-
{"current_steps": 166, "total_steps": 190, "loss": 0.0004, "learning_rate": 2.1613635589349756e-07, "epoch": 4.270096463022508, "percentage": 87.37, "elapsed_time": "0:36:33", "remaining_time": "0:05:17", "throughput": "481.61", "total_tokens": 1056496}
|
167 |
-
{"current_steps": 167, "total_steps": 190, "loss": 0.0049, "learning_rate": 1.9873786636889908e-07, "epoch": 4.295819935691318, "percentage": 87.89, "elapsed_time": "0:36:46", "remaining_time": "0:05:03", "throughput": "481.61", "total_tokens": 1062848}
|
168 |
-
{"current_steps": 168, "total_steps": 190, "loss": 0.0071, "learning_rate": 1.8204036358303173e-07, "epoch": 4.321543408360129, "percentage": 88.42, "elapsed_time": "0:37:00", "remaining_time": "0:04:50", "throughput": "481.58", "total_tokens": 1069136}
|
169 |
-
{"current_steps": 169, "total_steps": 190, "loss": 0.0011, "learning_rate": 1.6604893375699594e-07, "epoch": 4.347266881028939, "percentage": 88.95, "elapsed_time": "0:37:13", "remaining_time": "0:04:37", "throughput": "481.50", "total_tokens": 1075328}
|
170 |
-
{"current_steps": 170, "total_steps": 190, "loss": 0.0004, "learning_rate": 1.507684480352292e-07, "epoch": 4.372990353697749, "percentage": 89.47, "elapsed_time": "0:37:26", "remaining_time": "0:04:24", "throughput": "481.50", "total_tokens": 1081696}
|
171 |
-
{"current_steps": 171, "total_steps": 190, "loss": 0.0007, "learning_rate": 1.362035610017079e-07, "epoch": 4.39871382636656, "percentage": 90.0, "elapsed_time": "0:37:39", "remaining_time": "0:04:11", "throughput": "481.53", "total_tokens": 1088112}
|
172 |
-
{"current_steps": 172, "total_steps": 190, "loss": 0.0017, "learning_rate": 1.223587092621162e-07, "epoch": 4.42443729903537, "percentage": 90.53, "elapsed_time": "0:37:52", "remaining_time": "0:03:57", "throughput": "481.55", "total_tokens": 1094512}
|
173 |
-
{"current_steps": 173, "total_steps": 190, "loss": 0.0007, "learning_rate": 1.0923811009241142e-07, "epoch": 4.45016077170418, "percentage": 91.05, "elapsed_time": "0:38:06", "remaining_time": "0:03:44", "throughput": "481.62", "total_tokens": 1101040}
|
174 |
-
{"current_steps": 174, "total_steps": 190, "loss": 0.0003, "learning_rate": 9.684576015420277e-08, "epoch": 4.47588424437299, "percentage": 91.58, "elapsed_time": "0:38:19", "remaining_time": "0:03:31", "throughput": "481.52", "total_tokens": 1107168}
|
175 |
-
{"current_steps": 175, "total_steps": 190, "loss": 0.0046, "learning_rate": 8.518543427732951e-08, "epoch": 4.501607717041801, "percentage": 92.11, "elapsed_time": "0:38:32", "remaining_time": "0:03:18", "throughput": "481.47", "total_tokens": 1113408}
|
176 |
-
{"current_steps": 176, "total_steps": 190, "loss": 0.0038, "learning_rate": 7.426068431000883e-08, "epoch": 4.527331189710611, "percentage": 92.63, "elapsed_time": "0:38:45", "remaining_time": "0:03:04", "throughput": "481.39", "total_tokens": 1119568}
|
177 |
-
{"current_steps": 177, "total_steps": 190, "loss": 0.0036, "learning_rate": 6.407483803691216e-08, "epoch": 4.553054662379421, "percentage": 93.16, "elapsed_time": "0:38:58", "remaining_time": "0:02:51", "throughput": "481.44", "total_tokens": 1126048}
|
178 |
-
{"current_steps": 178, "total_steps": 190, "loss": 0.0056, "learning_rate": 5.463099816548578e-08, "epoch": 4.578778135048232, "percentage": 93.68, "elapsed_time": "0:39:12", "remaining_time": "0:02:38", "throughput": "481.44", "total_tokens": 1132400}
|
179 |
-
{"current_steps": 179, "total_steps": 190, "loss": 0.0057, "learning_rate": 4.593204138084006e-08, "epoch": 4.604501607717042, "percentage": 94.21, "elapsed_time": "0:39:25", "remaining_time": "0:02:25", "throughput": "481.48", "total_tokens": 1138832}
|
180 |
-
{"current_steps": 180, "total_steps": 190, "loss": 0.002, "learning_rate": 3.798061746947995e-08, "epoch": 4.630225080385852, "percentage": 94.74, "elapsed_time": "0:39:38", "remaining_time": "0:02:12", "throughput": "481.54", "total_tokens": 1145344}
|
181 |
-
{"current_steps": 181, "total_steps": 190, "loss": 0.0003, "learning_rate": 3.077914851215585e-08, "epoch": 4.655948553054662, "percentage": 95.26, "elapsed_time": "0:39:51", "remaining_time": "0:01:58", "throughput": "481.47", "total_tokens": 1151536}
|
182 |
-
{"current_steps": 182, "total_steps": 190, "loss": 0.0002, "learning_rate": 2.4329828146074096e-08, "epoch": 4.681672025723473, "percentage": 95.79, "elapsed_time": "0:40:04", "remaining_time": "0:01:45", "throughput": "481.39", "total_tokens": 1157696}
|
183 |
-
{"current_steps": 183, "total_steps": 190, "loss": 0.0043, "learning_rate": 1.8634620896695044e-08, "epoch": 4.707395498392283, "percentage": 96.32, "elapsed_time": "0:40:18", "remaining_time": "0:01:32", "throughput": "481.25", "total_tokens": 1163712}
|
184 |
-
{"current_steps": 184, "total_steps": 190, "loss": 0.0002, "learning_rate": 1.3695261579316776e-08, "epoch": 4.733118971061093, "percentage": 96.84, "elapsed_time": "0:40:31", "remaining_time": "0:01:19", "throughput": "481.18", "total_tokens": 1169888}
|
185 |
-
{"current_steps": 185, "total_steps": 190, "loss": 0.0013, "learning_rate": 9.513254770636138e-09, "epoch": 4.758842443729904, "percentage": 97.37, "elapsed_time": "0:40:44", "remaining_time": "0:01:06", "throughput": "481.19", "total_tokens": 1176272}
|
186 |
-
{"current_steps": 186, "total_steps": 190, "loss": 0.0023, "learning_rate": 6.089874350439507e-09, "epoch": 4.784565916398714, "percentage": 97.89, "elapsed_time": "0:40:57", "remaining_time": "0:00:52", "throughput": "481.22", "total_tokens": 1182688}
|
187 |
-
{"current_steps": 187, "total_steps": 190, "loss": 0.0002, "learning_rate": 3.4261631135654174e-09, "epoch": 4.810289389067524, "percentage": 98.42, "elapsed_time": "0:41:10", "remaining_time": "0:00:39", "throughput": "481.35", "total_tokens": 1189360}
|
188 |
-
{"current_steps": 188, "total_steps": 190, "loss": 0.0015, "learning_rate": 1.5229324522605949e-09, "epoch": 4.836012861736334, "percentage": 98.95, "elapsed_time": "0:41:24", "remaining_time": "0:00:26", "throughput": "481.32", "total_tokens": 1195632}
|
189 |
-
{"current_steps": 189, "total_steps": 190, "loss": 0.0002, "learning_rate": 3.8076210902182607e-10, "epoch": 4.861736334405145, "percentage": 99.47, "elapsed_time": "0:41:37", "remaining_time": "0:00:13", "throughput": "481.33", "total_tokens": 1202016}
|
190 |
-
{"current_steps": 190, "total_steps": 190, "loss": 0.0028, "learning_rate": 0.0, "epoch": 4.887459807073955, "percentage": 100.0, "elapsed_time": "0:41:50", "remaining_time": "0:00:00", "throughput": "481.34", "total_tokens": 1208400}
|
191 |
-
{"current_steps": 190, "total_steps": 190, "epoch": 4.887459807073955, "percentage": 100.0, "elapsed_time": "0:42:51", "remaining_time": "0:00:00", "throughput": "469.99", "total_tokens": 1208400}
|
|
|
1 |
+
{"current_steps": 5, "total_steps": 78, "percentage": 6.41, "elapsed_time": "0:00:00", "remaining_time": "0:00:05"}
|
2 |
+
{"current_steps": 10, "total_steps": 78, "percentage": 12.82, "elapsed_time": "0:00:00", "remaining_time": "0:00:05"}
|
3 |
+
{"current_steps": 15, "total_steps": 78, "percentage": 19.23, "elapsed_time": "0:00:01", "remaining_time": "0:00:04"}
|
4 |
+
{"current_steps": 20, "total_steps": 78, "percentage": 25.64, "elapsed_time": "0:00:01", "remaining_time": "0:00:04"}
|
5 |
+
{"current_steps": 25, "total_steps": 78, "percentage": 32.05, "elapsed_time": "0:00:01", "remaining_time": "0:00:04"}
|
6 |
+
{"current_steps": 30, "total_steps": 78, "percentage": 38.46, "elapsed_time": "0:00:02", "remaining_time": "0:00:03"}
|
7 |
+
{"current_steps": 35, "total_steps": 78, "percentage": 44.87, "elapsed_time": "0:00:02", "remaining_time": "0:00:03"}
|
8 |
+
{"current_steps": 40, "total_steps": 78, "percentage": 51.28, "elapsed_time": "0:00:03", "remaining_time": "0:00:02"}
|
9 |
+
{"current_steps": 45, "total_steps": 78, "percentage": 57.69, "elapsed_time": "0:00:03", "remaining_time": "0:00:02"}
|
10 |
+
{"current_steps": 50, "total_steps": 78, "percentage": 64.1, "elapsed_time": "0:00:03", "remaining_time": "0:00:02"}
|
11 |
+
{"current_steps": 55, "total_steps": 78, "percentage": 70.51, "elapsed_time": "0:00:04", "remaining_time": "0:00:01"}
|
12 |
+
{"current_steps": 60, "total_steps": 78, "percentage": 76.92, "elapsed_time": "0:00:04", "remaining_time": "0:00:01"}
|
13 |
+
{"current_steps": 65, "total_steps": 78, "percentage": 83.33, "elapsed_time": "0:00:05", "remaining_time": "0:00:01"}
|
14 |
+
{"current_steps": 70, "total_steps": 78, "percentage": 89.74, "elapsed_time": "0:00:05", "remaining_time": "0:00:00"}
|
15 |
+
{"current_steps": 75, "total_steps": 78, "percentage": 96.15, "elapsed_time": "0:00:05", "remaining_time": "0:00:00"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
training_args.yaml
CHANGED
@@ -1,30 +1,18 @@
|
|
1 |
-
bf16: true
|
2 |
cutoff_len: 1024
|
3 |
-
dataset:
|
4 |
dataset_dir: data
|
5 |
-
|
6 |
-
deepspeed: cache/ds_z2_config.json
|
7 |
-
do_train: true
|
8 |
finetuning_type: full
|
9 |
flash_attn: auto
|
10 |
-
|
11 |
-
include_num_input_tokens_seen: true
|
12 |
-
learning_rate: 5.0e-06
|
13 |
-
logging_steps: 1
|
14 |
-
lr_scheduler_type: cosine
|
15 |
-
max_grad_norm: 1.0
|
16 |
max_samples: 100000
|
17 |
-
model_name_or_path:
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
packing: false
|
22 |
-
per_device_train_batch_size: 2
|
23 |
-
plot_loss: true
|
24 |
preprocessing_num_workers: 16
|
25 |
quantization_method: bitsandbytes
|
26 |
-
report_to: none
|
27 |
-
save_steps: 1000
|
28 |
stage: sft
|
|
|
29 |
template: llama3
|
30 |
-
|
|
|
|
|
1 |
cutoff_len: 1024
|
2 |
+
dataset: truth_dev_0716
|
3 |
dataset_dir: data
|
4 |
+
do_predict: true
|
|
|
|
|
5 |
finetuning_type: full
|
6 |
flash_attn: auto
|
7 |
+
max_new_tokens: 512
|
|
|
|
|
|
|
|
|
|
|
8 |
max_samples: 100000
|
9 |
+
model_name_or_path: saves/LLaMA3-8B-Chat/full/train_2024-07-23-06-00-05_llama3
|
10 |
+
output_dir: saves/LLaMA3-8B-Chat/full/eval_2024-07-23-06-52-5_llama3
|
11 |
+
per_device_eval_batch_size: 2
|
12 |
+
predict_with_generate: true
|
|
|
|
|
|
|
13 |
preprocessing_num_workers: 16
|
14 |
quantization_method: bitsandbytes
|
|
|
|
|
15 |
stage: sft
|
16 |
+
temperature: 0.95
|
17 |
template: llama3
|
18 |
+
top_p: 0.7
|