chchen commited on
Commit
3142e70
1 Parent(s): 7a971a6

Model save

Browse files
.ipynb_checkpoints/llama3_lora_sft-checkpoint.yaml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### model
2
+ model_name_or_path: tiiuae/falcon-7b-instruct
3
+ adapter_name_or_path: chchen/Falcon-7B-Instruct-ORPO
4
+
5
+ ### method
6
+ stage: sft
7
+ do_train: true
8
+ finetuning_type: lora
9
+ lora_target: all
10
+
11
+ ### dataset
12
+ dataset: bct_non_cot_sft_1000
13
+ dataset_dir: data_private
14
+ template: falcon
15
+ cutoff_len: 1024
16
+ # max_samples: 1000
17
+ overwrite_cache: true
18
+ preprocessing_num_workers: 16
19
+
20
+ ### output
21
+ output_dir: saves/Falcon-7B-Instruct/lora/orpo-sft
22
+ logging_steps: 10
23
+ save_steps: 500
24
+ plot_loss: true
25
+ overwrite_output_dir: true
26
+ save_total_limit: 3
27
+ load_best_model_at_end: true
28
+ push_to_hub: true
29
+ hub_model_id: chchen/Falcon-7B-Instruct-ORPO-SFT
30
+
31
+ ### train
32
+ per_device_train_batch_size: 2
33
+ gradient_accumulation_steps: 8
34
+ learning_rate: 0.000005
35
+ num_train_epochs: 3.0
36
+ lr_scheduler_type: cosine
37
+ warmup_steps: 0.1
38
+ fp16: true
39
+
40
+ ### eval
41
+ val_size: 0.1
42
+ per_device_eval_batch_size: 2
43
+ evaluation_strategy: steps
44
+ eval_steps: 500
README.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ library_name: peft
4
+ tags:
5
+ - llama-factory
6
+ - generated_from_trainer
7
+ base_model: tiiuae/falcon-7b-instruct
8
+ model-index:
9
+ - name: Falcon-7B-Instruct-ORPO-SFT
10
+ results: []
11
+ ---
12
+
13
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
14
+ should probably proofread and complete it, then remove this comment. -->
15
+
16
+ # Falcon-7B-Instruct-ORPO-SFT
17
+
18
+ This model is a fine-tuned version of [tiiuae/falcon-7b-instruct](https://huggingface.co/tiiuae/falcon-7b-instruct) on an unknown dataset.
19
+
20
+ ## Model description
21
+
22
+ More information needed
23
+
24
+ ## Intended uses & limitations
25
+
26
+ More information needed
27
+
28
+ ## Training and evaluation data
29
+
30
+ More information needed
31
+
32
+ ## Training procedure
33
+
34
+ ### Training hyperparameters
35
+
36
+ The following hyperparameters were used during training:
37
+ - learning_rate: 5e-06
38
+ - train_batch_size: 2
39
+ - eval_batch_size: 2
40
+ - seed: 42
41
+ - gradient_accumulation_steps: 8
42
+ - total_train_batch_size: 16
43
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
44
+ - lr_scheduler_type: cosine
45
+ - lr_scheduler_warmup_steps: 0.1
46
+ - num_epochs: 3.0
47
+ - mixed_precision_training: Native AMP
48
+
49
+ ### Training results
50
+
51
+
52
+
53
+ ### Framework versions
54
+
55
+ - PEFT 0.10.0
56
+ - Transformers 4.40.1
57
+ - Pytorch 2.3.0
58
+ - Datasets 2.19.0
59
+ - Tokenizers 0.19.1
adapter_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "tiiuae/falcon-7b-instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 8,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "query_key_value",
24
+ "dense_h_to_4h",
25
+ "dense",
26
+ "dense_4h_to_h"
27
+ ],
28
+ "task_type": "CAUSAL_LM",
29
+ "use_dora": false,
30
+ "use_rslora": false
31
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f996255880a06aa5804440d398a1457ede88212dc72ada3270f593e3bebf4061
3
+ size 65309632
llama3_lora_sft.yaml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### model
2
+ model_name_or_path: tiiuae/falcon-7b-instruct
3
+ adapter_name_or_path: chchen/Falcon-7B-Instruct-ORPO
4
+
5
+ ### method
6
+ stage: sft
7
+ do_train: true
8
+ finetuning_type: lora
9
+ lora_target: all
10
+
11
+ ### dataset
12
+ dataset: bct_non_cot_sft_1000
13
+ dataset_dir: data_private
14
+ template: falcon
15
+ cutoff_len: 1024
16
+ # max_samples: 1000
17
+ overwrite_cache: true
18
+ preprocessing_num_workers: 16
19
+
20
+ ### output
21
+ output_dir: saves/Falcon-7B-Instruct/lora/orpo-sft
22
+ logging_steps: 10
23
+ save_steps: 500
24
+ plot_loss: true
25
+ overwrite_output_dir: true
26
+ save_total_limit: 3
27
+ load_best_model_at_end: true
28
+ push_to_hub: true
29
+ hub_model_id: chchen/Falcon-7B-Instruct-ORPO-SFT
30
+
31
+ ### train
32
+ per_device_train_batch_size: 2
33
+ gradient_accumulation_steps: 8
34
+ learning_rate: 0.000005
35
+ num_train_epochs: 3.0
36
+ lr_scheduler_type: cosine
37
+ warmup_steps: 0.1
38
+ fp16: true
39
+
40
+ ### eval
41
+ val_size: 0.1
42
+ per_device_eval_batch_size: 2
43
+ evaluation_strategy: steps
44
+ eval_steps: 500
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ ">>TITLE<<",
4
+ ">>ABSTRACT<<",
5
+ ">>INTRODUCTION<<",
6
+ ">>SUMMARY<<",
7
+ ">>COMMENT<<",
8
+ ">>ANSWER<<",
9
+ ">>QUESTION<<",
10
+ ">>DOMAIN<<",
11
+ ">>PREFIX<<",
12
+ ">>SUFFIX<<",
13
+ ">>MIDDLE<<"
14
+ ],
15
+ "eos_token": {
16
+ "content": "<|endoftext|>",
17
+ "lstrip": false,
18
+ "normalized": false,
19
+ "rstrip": false,
20
+ "single_word": false
21
+ },
22
+ "pad_token": "<|endoftext|>"
23
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "0": {
5
+ "content": ">>TITLE<<",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "1": {
13
+ "content": ">>ABSTRACT<<",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "2": {
21
+ "content": ">>INTRODUCTION<<",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "3": {
29
+ "content": ">>SUMMARY<<",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "4": {
37
+ "content": ">>COMMENT<<",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "5": {
45
+ "content": ">>ANSWER<<",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "6": {
53
+ "content": ">>QUESTION<<",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "7": {
61
+ "content": ">>DOMAIN<<",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "8": {
69
+ "content": ">>PREFIX<<",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "9": {
77
+ "content": ">>SUFFIX<<",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "10": {
85
+ "content": ">>MIDDLE<<",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "11": {
93
+ "content": "<|endoftext|>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ }
100
+ },
101
+ "additional_special_tokens": [
102
+ ">>TITLE<<",
103
+ ">>ABSTRACT<<",
104
+ ">>INTRODUCTION<<",
105
+ ">>SUMMARY<<",
106
+ ">>COMMENT<<",
107
+ ">>ANSWER<<",
108
+ ">>QUESTION<<",
109
+ ">>DOMAIN<<",
110
+ ">>PREFIX<<",
111
+ ">>SUFFIX<<",
112
+ ">>MIDDLE<<"
113
+ ],
114
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ 'User: ' + content + '\nFalcon:' }}{% elif message['role'] == 'assistant' %}{{ content + '\n' }}{% endif %}{% endfor %}",
115
+ "clean_up_tokenization_spaces": true,
116
+ "eos_token": "<|endoftext|>",
117
+ "model_input_names": [
118
+ "input_ids",
119
+ "attention_mask"
120
+ ],
121
+ "model_max_length": 2048,
122
+ "pad_token": "<|endoftext|>",
123
+ "padding_side": "right",
124
+ "split_special_tokens": false,
125
+ "tokenizer_class": "PreTrainedTokenizerFast"
126
+ }
trainer_log.jsonl ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"current_steps": 10, "total_steps": 168, "loss": 0.8422, "learning_rate": 4.957230266673969e-06, "epoch": 0.17777777777777778, "percentage": 5.95, "elapsed_time": "0:00:16", "remaining_time": "0:04:23"}
2
+ {"current_steps": 20, "total_steps": 168, "loss": 0.7171, "learning_rate": 4.828686741593921e-06, "epoch": 0.35555555555555557, "percentage": 11.9, "elapsed_time": "0:00:32", "remaining_time": "0:04:00"}
3
+ {"current_steps": 30, "total_steps": 168, "loss": 0.5551, "learning_rate": 4.618852307232078e-06, "epoch": 0.5333333333333333, "percentage": 17.86, "elapsed_time": "0:00:48", "remaining_time": "0:03:42"}
4
+ {"current_steps": 40, "total_steps": 168, "loss": 0.4059, "learning_rate": 4.335051964269395e-06, "epoch": 0.7111111111111111, "percentage": 23.81, "elapsed_time": "0:01:04", "remaining_time": "0:03:24"}
5
+ {"current_steps": 50, "total_steps": 168, "loss": 0.3053, "learning_rate": 3.987192750660719e-06, "epoch": 0.8888888888888888, "percentage": 29.76, "elapsed_time": "0:01:19", "remaining_time": "0:03:08"}
6
+ {"current_steps": 60, "total_steps": 168, "loss": 0.2825, "learning_rate": 3.587417902020876e-06, "epoch": 1.0666666666666667, "percentage": 35.71, "elapsed_time": "0:01:35", "remaining_time": "0:02:52"}
7
+ {"current_steps": 70, "total_steps": 168, "loss": 0.2082, "learning_rate": 3.1496829497545268e-06, "epoch": 1.2444444444444445, "percentage": 41.67, "elapsed_time": "0:01:51", "remaining_time": "0:02:35"}
8
+ {"current_steps": 80, "total_steps": 168, "loss": 0.1955, "learning_rate": 2.6892685546987724e-06, "epoch": 1.4222222222222223, "percentage": 47.62, "elapsed_time": "0:02:07", "remaining_time": "0:02:19"}
9
+ {"current_steps": 90, "total_steps": 168, "loss": 0.2161, "learning_rate": 2.2222470825144806e-06, "epoch": 1.6, "percentage": 53.57, "elapsed_time": "0:02:22", "remaining_time": "0:02:03"}
10
+ {"current_steps": 100, "total_steps": 168, "loss": 0.169, "learning_rate": 1.7649215418673847e-06, "epoch": 1.7777777777777777, "percentage": 59.52, "elapsed_time": "0:02:38", "remaining_time": "0:01:47"}
11
+ {"current_steps": 110, "total_steps": 168, "loss": 0.2051, "learning_rate": 1.3332564712129845e-06, "epoch": 1.9555555555555557, "percentage": 65.48, "elapsed_time": "0:02:54", "remaining_time": "0:01:31"}
12
+ {"current_steps": 120, "total_steps": 168, "loss": 0.2007, "learning_rate": 9.423206410612498e-07, "epoch": 2.1333333333333333, "percentage": 71.43, "elapsed_time": "0:03:09", "remaining_time": "0:01:15"}
13
+ {"current_steps": 130, "total_steps": 168, "loss": 0.1866, "learning_rate": 6.057610261367044e-07, "epoch": 2.311111111111111, "percentage": 77.38, "elapsed_time": "0:03:25", "remaining_time": "0:01:00"}
14
+ {"current_steps": 140, "total_steps": 168, "loss": 0.2212, "learning_rate": 3.3532641026504415e-07, "epoch": 2.488888888888889, "percentage": 83.33, "elapsed_time": "0:03:41", "remaining_time": "0:00:44"}
15
+ {"current_steps": 150, "total_steps": 168, "loss": 0.1776, "learning_rate": 1.4045725421448332e-07, "epoch": 2.6666666666666665, "percentage": 89.29, "elapsed_time": "0:03:56", "remaining_time": "0:00:28"}
16
+ {"current_steps": 160, "total_steps": 168, "loss": 0.1686, "learning_rate": 2.7956143581177874e-08, "epoch": 2.8444444444444446, "percentage": 95.24, "elapsed_time": "0:04:11", "remaining_time": "0:00:12"}
17
+ {"current_steps": 168, "total_steps": 168, "epoch": 2.986666666666667, "percentage": 100.0, "elapsed_time": "0:04:24", "remaining_time": "0:00:00"}
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56cfcd67ac4c548c481e0e5a8e0735d022ec22fd4a6e0a561b75eeba041857e0
3
+ size 5240