ZhangShenao commited on
Commit
d24199c
·
verified ·
1 Parent(s): 13362df

Model save

Browse files
README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: llama3.2
4
+ base_model: ZhangShenao/SELM-Llama-3.2-3B-Instruct-re-00-iter-1
5
+ tags:
6
+ - trl
7
+ - dpo
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: SELM-Llama-3.2-3B-Instruct-re-00-iter-2
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # SELM-Llama-3.2-3B-Instruct-re-00-iter-2
18
+
19
+ This model is a fine-tuned version of [ZhangShenao/SELM-Llama-3.2-3B-Instruct-re-00-iter-1](https://huggingface.co/ZhangShenao/SELM-Llama-3.2-3B-Instruct-re-00-iter-1) on the None dataset.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 3e-07
39
+ - train_batch_size: 2
40
+ - eval_batch_size: 2
41
+ - seed: 42
42
+ - distributed_type: multi-GPU
43
+ - num_devices: 8
44
+ - gradient_accumulation_steps: 8
45
+ - total_train_batch_size: 128
46
+ - total_eval_batch_size: 16
47
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
48
+ - lr_scheduler_type: cosine
49
+ - lr_scheduler_warmup_ratio: 0.1
50
+ - num_epochs: 1
51
+
52
+ ### Training results
53
+
54
+
55
+
56
+ ### Framework versions
57
+
58
+ - Transformers 4.45.0
59
+ - Pytorch 2.5.1+cu124
60
+ - Datasets 2.14.6
61
+ - Tokenizers 0.20.3
all_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9681528662420382,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.4241211351595427,
5
+ "train_runtime": 2419.73,
6
+ "train_samples": 2500,
7
+ "train_samples_per_second": 1.033,
8
+ "train_steps_per_second": 0.008
9
+ }
generation_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 128000,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 128001,
6
+ 128008,
7
+ 128009
8
+ ],
9
+ "temperature": 0.6,
10
+ "top_p": 0.9,
11
+ "transformers_version": "4.45.0"
12
+ }
model-00001-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f9a1859c20e13c8ea092eb2b494dabf8039ee0c33a24a055442c2057f844ddb6
3
  size 4965799096
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8922eab2d0c8f607cbd09a273044d82a58b110b5a1a3f74f8327e30eba454cf9
3
  size 4965799096
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 0.9681528662420382,
3
+ "total_flos": 0.0,
4
+ "train_loss": 0.4241211351595427,
5
+ "train_runtime": 2419.73,
6
+ "train_samples": 2500,
7
+ "train_samples_per_second": 1.033,
8
+ "train_steps_per_second": 0.008
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.9681528662420382,
5
+ "eval_steps": 500,
6
+ "global_step": 19,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.050955414012738856,
13
+ "grad_norm": 2006.6288901187897,
14
+ "learning_rate": 1.5e-07,
15
+ "logits/chosen": -0.01424434781074524,
16
+ "logits/rejected": -0.15669912099838257,
17
+ "logps/chosen": -4314.98779296875,
18
+ "logps/pi_response": -2794.38134765625,
19
+ "logps/ref_response": -2794.38134765625,
20
+ "logps/rejected": -3298.451904296875,
21
+ "loss": 0.6933,
22
+ "rewards/accuracies": 0.0,
23
+ "rewards/chosen": 0.0,
24
+ "rewards/margins": 0.0,
25
+ "rewards/rejected": 0.0,
26
+ "step": 1
27
+ },
28
+ {
29
+ "epoch": 0.5095541401273885,
30
+ "grad_norm": 105.62494618629897,
31
+ "learning_rate": 1.638402539194953e-07,
32
+ "logits/chosen": -0.28204211592674255,
33
+ "logits/rejected": -0.29945388436317444,
34
+ "logps/chosen": -3470.167236328125,
35
+ "logps/pi_response": -3325.044921875,
36
+ "logps/ref_response": -3311.615478515625,
37
+ "logps/rejected": -2873.7626953125,
38
+ "loss": 0.443,
39
+ "rewards/accuracies": 0.7638888955116272,
40
+ "rewards/chosen": 1.253909707069397,
41
+ "rewards/margins": 0.959709882736206,
42
+ "rewards/rejected": 0.2941998243331909,
43
+ "step": 10
44
+ },
45
+ {
46
+ "epoch": 0.9681528662420382,
47
+ "step": 19,
48
+ "total_flos": 0.0,
49
+ "train_loss": 0.4241211351595427,
50
+ "train_runtime": 2419.73,
51
+ "train_samples_per_second": 1.033,
52
+ "train_steps_per_second": 0.008
53
+ }
54
+ ],
55
+ "logging_steps": 10,
56
+ "max_steps": 19,
57
+ "num_input_tokens_seen": 0,
58
+ "num_train_epochs": 1,
59
+ "save_steps": 100,
60
+ "stateful_callbacks": {
61
+ "TrainerControl": {
62
+ "args": {
63
+ "should_epoch_stop": false,
64
+ "should_evaluate": false,
65
+ "should_log": false,
66
+ "should_save": true,
67
+ "should_training_stop": true
68
+ },
69
+ "attributes": {}
70
+ }
71
+ },
72
+ "total_flos": 0.0,
73
+ "train_batch_size": 2,
74
+ "trial_name": null,
75
+ "trial_params": null
76
+ }