gsmyrnis commited on
Commit
ca075ab
1 Parent(s): 8527fac

Model save

Browse files
README.md ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: llama3.1
4
+ base_model: meta-llama/Meta-Llama-3.1-8B
5
+ tags:
6
+ - llama-factory
7
+ - generated_from_trainer
8
+ model-index:
9
+ - name: llama3-1_8b_baseline_infinity_instruct
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # llama3-1_8b_baseline_infinity_instruct
17
+
18
+ This model is a fine-tuned version of [meta-llama/Meta-Llama-3.1-8B](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B) on an unknown dataset.
19
+ It achieves the following results on the evaluation set:
20
+ - Loss: 0.9010
21
+
22
+ ## Model description
23
+
24
+ More information needed
25
+
26
+ ## Intended uses & limitations
27
+
28
+ More information needed
29
+
30
+ ## Training and evaluation data
31
+
32
+ More information needed
33
+
34
+ ## Training procedure
35
+
36
+ ### Training hyperparameters
37
+
38
+ The following hyperparameters were used during training:
39
+ - learning_rate: 5e-06
40
+ - train_batch_size: 16
41
+ - eval_batch_size: 8
42
+ - seed: 42
43
+ - distributed_type: multi-GPU
44
+ - num_devices: 32
45
+ - total_train_batch_size: 512
46
+ - total_eval_batch_size: 256
47
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
48
+ - lr_scheduler_type: constant
49
+ - lr_scheduler_warmup_ratio: 0.1
50
+ - lr_scheduler_warmup_steps: 1738
51
+ - num_epochs: 3.0
52
+
53
+ ### Training results
54
+
55
+ | Training Loss | Epoch | Step | Validation Loss |
56
+ |:-------------:|:-----:|:----:|:---------------:|
57
+ | 0.9018 | 1.0 | 423 | 0.9103 |
58
+ | 0.8586 | 2.0 | 846 | 0.9001 |
59
+ | 0.8143 | 3.0 | 1269 | 0.9010 |
60
+
61
+
62
+ ### Framework versions
63
+
64
+ - Transformers 4.44.2
65
+ - Pytorch 2.4.0
66
+ - Datasets 2.21.0
67
+ - Tokenizers 0.19.1
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 128000,
4
+ "do_sample": true,
5
+ "eos_token_id": 128001,
6
+ "temperature": 0.6,
7
+ "top_p": 0.9,
8
+ "transformers_version": "4.44.2"
9
+ }
model-00001-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0b9de2a6033f253181dfc5839a3975f46c30ab6314e2b749e62ddadf701daa72
3
  size 4976698672
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:537c809d791b36a52236c4e112ee8df77fcc3eb0e22c362a1150739bb92a63e1
3
  size 4976698672
model-00002-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9da0f6bc1e5a6188ed9e32371afa146a8b0145e6001cd45099d9f1a852bb008a
3
  size 4999802720
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:148ba221ffc8e8cfabff0acc6485c8c3ed98b52494958df466c27117eb05c983
3
  size 4999802720
model-00003-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4a5636996dfdb10b6901afb8ba17208ac60274d6efa2ddcd253c5412363f570d
3
  size 4915916176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21a9d546ada77f79cd23b313febd6801f4c87e430757678ebb3e58f496d322a4
3
  size 4915916176
model-00004-of-00004.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2eb5a25fde479d1dc1fff7586952af9caf53bc8d93e657d9813dd2838bf64122
3
  size 1168138808
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:215bfb79492237048d85f6745bdd84c0fb09a31057f4020be504028602757617
3
  size 1168138808
trainer_log.jsonl CHANGED
@@ -87,3 +87,44 @@
87
  {"current_steps": 850, "total_steps": 1269, "loss": 0.8393, "learning_rate": 5e-06, "epoch": 2.0094562647754137, "percentage": 66.98, "elapsed_time": "1:38:08", "remaining_time": "0:48:22"}
88
  {"current_steps": 860, "total_steps": 1269, "loss": 0.8071, "learning_rate": 5e-06, "epoch": 2.033096926713948, "percentage": 67.77, "elapsed_time": "1:39:12", "remaining_time": "0:47:10"}
89
  {"current_steps": 870, "total_steps": 1269, "loss": 0.8083, "learning_rate": 5e-06, "epoch": 2.0567375886524824, "percentage": 68.56, "elapsed_time": "1:40:14", "remaining_time": "0:45:58"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  {"current_steps": 850, "total_steps": 1269, "loss": 0.8393, "learning_rate": 5e-06, "epoch": 2.0094562647754137, "percentage": 66.98, "elapsed_time": "1:38:08", "remaining_time": "0:48:22"}
88
  {"current_steps": 860, "total_steps": 1269, "loss": 0.8071, "learning_rate": 5e-06, "epoch": 2.033096926713948, "percentage": 67.77, "elapsed_time": "1:39:12", "remaining_time": "0:47:10"}
89
  {"current_steps": 870, "total_steps": 1269, "loss": 0.8083, "learning_rate": 5e-06, "epoch": 2.0567375886524824, "percentage": 68.56, "elapsed_time": "1:40:14", "remaining_time": "0:45:58"}
90
+ {"current_steps": 880, "total_steps": 1269, "loss": 0.8135, "learning_rate": 5e-06, "epoch": 2.0803782505910164, "percentage": 69.35, "elapsed_time": "1:41:17", "remaining_time": "0:44:46"}
91
+ {"current_steps": 890, "total_steps": 1269, "loss": 0.8104, "learning_rate": 5e-06, "epoch": 2.104018912529551, "percentage": 70.13, "elapsed_time": "1:42:19", "remaining_time": "0:43:34"}
92
+ {"current_steps": 900, "total_steps": 1269, "loss": 0.8155, "learning_rate": 5e-06, "epoch": 2.127659574468085, "percentage": 70.92, "elapsed_time": "1:43:22", "remaining_time": "0:42:23"}
93
+ {"current_steps": 910, "total_steps": 1269, "loss": 0.8108, "learning_rate": 5e-06, "epoch": 2.1513002364066196, "percentage": 71.71, "elapsed_time": "1:44:26", "remaining_time": "0:41:12"}
94
+ {"current_steps": 920, "total_steps": 1269, "loss": 0.808, "learning_rate": 5e-06, "epoch": 2.1749408983451537, "percentage": 72.5, "elapsed_time": "1:45:29", "remaining_time": "0:40:00"}
95
+ {"current_steps": 930, "total_steps": 1269, "loss": 0.8152, "learning_rate": 5e-06, "epoch": 2.198581560283688, "percentage": 73.29, "elapsed_time": "1:46:31", "remaining_time": "0:38:49"}
96
+ {"current_steps": 940, "total_steps": 1269, "loss": 0.8114, "learning_rate": 5e-06, "epoch": 2.2222222222222223, "percentage": 74.07, "elapsed_time": "1:47:34", "remaining_time": "0:37:38"}
97
+ {"current_steps": 950, "total_steps": 1269, "loss": 0.8133, "learning_rate": 5e-06, "epoch": 2.2458628841607564, "percentage": 74.86, "elapsed_time": "1:48:36", "remaining_time": "0:36:28"}
98
+ {"current_steps": 960, "total_steps": 1269, "loss": 0.815, "learning_rate": 5e-06, "epoch": 2.269503546099291, "percentage": 75.65, "elapsed_time": "1:49:39", "remaining_time": "0:35:17"}
99
+ {"current_steps": 970, "total_steps": 1269, "loss": 0.8096, "learning_rate": 5e-06, "epoch": 2.293144208037825, "percentage": 76.44, "elapsed_time": "1:50:41", "remaining_time": "0:34:07"}
100
+ {"current_steps": 980, "total_steps": 1269, "loss": 0.8098, "learning_rate": 5e-06, "epoch": 2.3167848699763596, "percentage": 77.23, "elapsed_time": "1:51:44", "remaining_time": "0:32:57"}
101
+ {"current_steps": 990, "total_steps": 1269, "loss": 0.8124, "learning_rate": 5e-06, "epoch": 2.3404255319148937, "percentage": 78.01, "elapsed_time": "1:52:46", "remaining_time": "0:31:46"}
102
+ {"current_steps": 1000, "total_steps": 1269, "loss": 0.8095, "learning_rate": 5e-06, "epoch": 2.3640661938534278, "percentage": 78.8, "elapsed_time": "1:53:49", "remaining_time": "0:30:37"}
103
+ {"current_steps": 1010, "total_steps": 1269, "loss": 0.8118, "learning_rate": 5e-06, "epoch": 2.3877068557919623, "percentage": 79.59, "elapsed_time": "1:54:52", "remaining_time": "0:29:27"}
104
+ {"current_steps": 1020, "total_steps": 1269, "loss": 0.8124, "learning_rate": 5e-06, "epoch": 2.4113475177304964, "percentage": 80.38, "elapsed_time": "1:55:54", "remaining_time": "0:28:17"}
105
+ {"current_steps": 1030, "total_steps": 1269, "loss": 0.8129, "learning_rate": 5e-06, "epoch": 2.434988179669031, "percentage": 81.17, "elapsed_time": "1:56:57", "remaining_time": "0:27:08"}
106
+ {"current_steps": 1040, "total_steps": 1269, "loss": 0.814, "learning_rate": 5e-06, "epoch": 2.458628841607565, "percentage": 81.95, "elapsed_time": "1:57:59", "remaining_time": "0:25:58"}
107
+ {"current_steps": 1050, "total_steps": 1269, "loss": 0.809, "learning_rate": 5e-06, "epoch": 2.482269503546099, "percentage": 82.74, "elapsed_time": "1:59:02", "remaining_time": "0:24:49"}
108
+ {"current_steps": 1060, "total_steps": 1269, "loss": 0.8192, "learning_rate": 5e-06, "epoch": 2.5059101654846336, "percentage": 83.53, "elapsed_time": "2:00:04", "remaining_time": "0:23:40"}
109
+ {"current_steps": 1070, "total_steps": 1269, "loss": 0.8177, "learning_rate": 5e-06, "epoch": 2.5295508274231677, "percentage": 84.32, "elapsed_time": "2:01:07", "remaining_time": "0:22:31"}
110
+ {"current_steps": 1080, "total_steps": 1269, "loss": 0.817, "learning_rate": 5e-06, "epoch": 2.5531914893617023, "percentage": 85.11, "elapsed_time": "2:02:09", "remaining_time": "0:21:22"}
111
+ {"current_steps": 1090, "total_steps": 1269, "loss": 0.8118, "learning_rate": 5e-06, "epoch": 2.5768321513002364, "percentage": 85.89, "elapsed_time": "2:03:12", "remaining_time": "0:20:13"}
112
+ {"current_steps": 1100, "total_steps": 1269, "loss": 0.8218, "learning_rate": 5e-06, "epoch": 2.6004728132387704, "percentage": 86.68, "elapsed_time": "2:04:14", "remaining_time": "0:19:05"}
113
+ {"current_steps": 1110, "total_steps": 1269, "loss": 0.8142, "learning_rate": 5e-06, "epoch": 2.624113475177305, "percentage": 87.47, "elapsed_time": "2:05:17", "remaining_time": "0:17:56"}
114
+ {"current_steps": 1120, "total_steps": 1269, "loss": 0.8194, "learning_rate": 5e-06, "epoch": 2.6477541371158395, "percentage": 88.26, "elapsed_time": "2:06:21", "remaining_time": "0:16:48"}
115
+ {"current_steps": 1130, "total_steps": 1269, "loss": 0.8206, "learning_rate": 5e-06, "epoch": 2.6713947990543736, "percentage": 89.05, "elapsed_time": "2:07:24", "remaining_time": "0:15:40"}
116
+ {"current_steps": 1140, "total_steps": 1269, "loss": 0.8192, "learning_rate": 5e-06, "epoch": 2.6950354609929077, "percentage": 89.83, "elapsed_time": "2:08:26", "remaining_time": "0:14:32"}
117
+ {"current_steps": 1150, "total_steps": 1269, "loss": 0.8165, "learning_rate": 5e-06, "epoch": 2.7186761229314422, "percentage": 90.62, "elapsed_time": "2:09:29", "remaining_time": "0:13:23"}
118
+ {"current_steps": 1160, "total_steps": 1269, "loss": 0.8205, "learning_rate": 5e-06, "epoch": 2.7423167848699763, "percentage": 91.41, "elapsed_time": "2:10:32", "remaining_time": "0:12:15"}
119
+ {"current_steps": 1170, "total_steps": 1269, "loss": 0.8139, "learning_rate": 5e-06, "epoch": 2.7659574468085104, "percentage": 92.2, "elapsed_time": "2:11:38", "remaining_time": "0:11:08"}
120
+ {"current_steps": 1180, "total_steps": 1269, "loss": 0.8119, "learning_rate": 5e-06, "epoch": 2.789598108747045, "percentage": 92.99, "elapsed_time": "2:12:47", "remaining_time": "0:10:00"}
121
+ {"current_steps": 1190, "total_steps": 1269, "loss": 0.8206, "learning_rate": 5e-06, "epoch": 2.813238770685579, "percentage": 93.77, "elapsed_time": "2:13:49", "remaining_time": "0:08:53"}
122
+ {"current_steps": 1200, "total_steps": 1269, "loss": 0.8144, "learning_rate": 5e-06, "epoch": 2.8368794326241136, "percentage": 94.56, "elapsed_time": "2:14:53", "remaining_time": "0:07:45"}
123
+ {"current_steps": 1210, "total_steps": 1269, "loss": 0.8179, "learning_rate": 5e-06, "epoch": 2.8605200945626477, "percentage": 95.35, "elapsed_time": "2:15:57", "remaining_time": "0:06:37"}
124
+ {"current_steps": 1220, "total_steps": 1269, "loss": 0.8157, "learning_rate": 5e-06, "epoch": 2.884160756501182, "percentage": 96.14, "elapsed_time": "2:16:59", "remaining_time": "0:05:30"}
125
+ {"current_steps": 1230, "total_steps": 1269, "loss": 0.8162, "learning_rate": 5e-06, "epoch": 2.9078014184397163, "percentage": 96.93, "elapsed_time": "2:18:01", "remaining_time": "0:04:22"}
126
+ {"current_steps": 1240, "total_steps": 1269, "loss": 0.8133, "learning_rate": 5e-06, "epoch": 2.9314420803782504, "percentage": 97.71, "elapsed_time": "2:19:04", "remaining_time": "0:03:15"}
127
+ {"current_steps": 1250, "total_steps": 1269, "loss": 0.8167, "learning_rate": 5e-06, "epoch": 2.955082742316785, "percentage": 98.5, "elapsed_time": "2:20:06", "remaining_time": "0:02:07"}
128
+ {"current_steps": 1260, "total_steps": 1269, "loss": 0.8143, "learning_rate": 5e-06, "epoch": 2.978723404255319, "percentage": 99.29, "elapsed_time": "2:21:10", "remaining_time": "0:01:00"}
129
+ {"current_steps": 1269, "total_steps": 1269, "eval_loss": 0.9010276198387146, "epoch": 3.0, "percentage": 100.0, "elapsed_time": "2:26:36", "remaining_time": "0:00:00"}
130
+ {"current_steps": 1269, "total_steps": 1269, "epoch": 3.0, "percentage": 100.0, "elapsed_time": "2:29:55", "remaining_time": "0:00:00"}