Felladrin commited on
Commit
30589cc
·
verified ·
1 Parent(s): 72445a9

Upload sample_finetune.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. sample_finetune.py +214 -0
sample_finetune.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import logging
3
+
4
+ import datasets
5
+ from datasets import load_dataset
6
+ from peft import LoraConfig
7
+ import torch
8
+ import transformers
9
+ from trl import SFTTrainer
10
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, BitsAndBytesConfig
11
+
12
+ """
13
+ A simple example on using SFTTrainer and Accelerate to finetune Phi-4-Mini-Instruct model. For
14
+ a more advanced example, please follow HF alignment-handbook/scripts/run_sft.py.
15
+ This example has utilized DeepSpeed ZeRO3 offload to reduce the memory usage. The
16
+ script can be run on V100 or later generation GPUs. Here are some suggestions on
17
+ futher reducing memory consumption:
18
+ - reduce batch size
19
+ - decrease lora dimension
20
+ - restrict lora target modules
21
+ Please follow these steps to run the script:
22
+ 1. Install dependencies:
23
+ conda install -c conda-forge accelerate=1.3.0
24
+ pip3 install -i https://pypi.org/simple/ bitsandbytes
25
+ pip3 install peft==0.14.0
26
+ pip3 install transformers==4.48.1
27
+ pip3 install trl datasets
28
+ pip3 install deepspeed
29
+ 2. Setup accelerate and deepspeed config based on the machine used:
30
+ accelerate config
31
+ Here is a sample config for deepspeed zero3:
32
+ compute_environment: LOCAL_MACHINE
33
+ debug: false
34
+ deepspeed_config:
35
+ gradient_accumulation_steps: 1
36
+ offload_optimizer_device: none
37
+ offload_param_device: none
38
+ zero3_init_flag: true
39
+ zero3_save_16bit_model: true
40
+ zero_stage: 3
41
+ distributed_type: DEEPSPEED
42
+ downcast_bf16: 'no'
43
+ enable_cpu_affinity: false
44
+ machine_rank: 0
45
+ main_training_function: main
46
+ mixed_precision: bf16
47
+ num_machines: 1
48
+ num_processes: 4
49
+ rdzv_backend: static
50
+ same_network: true
51
+ tpu_env: []
52
+ tpu_use_cluster: false
53
+ tpu_use_sudo: false
54
+ use_cpu: false
55
+ 3. check accelerate config:
56
+ accelerate env
57
+ 4. Run the code:
58
+ accelerate launch sample_finetune.py
59
+ """
60
+
61
+ logger = logging.getLogger(__name__)
62
+
63
+
64
+ ###################
65
+ # Hyper-parameters
66
+ ###################
67
+ training_config = {
68
+ "bf16": True,
69
+ "do_eval": False,
70
+ "learning_rate": 5.0e-06,
71
+ "log_level": "info",
72
+ "logging_steps": 20,
73
+ "logging_strategy": "steps",
74
+ "lr_scheduler_type": "cosine",
75
+ "num_train_epochs": 1,
76
+ "max_steps": -1,
77
+ "output_dir": "./checkpoint_dir",
78
+ "overwrite_output_dir": True,
79
+ "per_device_eval_batch_size": 4,
80
+ "per_device_train_batch_size": 4,
81
+ "remove_unused_columns": True,
82
+ "save_steps": 100,
83
+ "save_total_limit": 1,
84
+ "seed": 0,
85
+ "gradient_checkpointing": True,
86
+ "gradient_checkpointing_kwargs":{"use_reentrant": False},
87
+ "gradient_accumulation_steps": 1,
88
+ "warmup_ratio": 0.2,
89
+ }
90
+
91
+ peft_config = {
92
+ "r": 16,
93
+ "lora_alpha": 32,
94
+ "lora_dropout": 0.05,
95
+ "bias": "none",
96
+ "task_type": "CAUSAL_LM",
97
+ "target_modules": "all-linear",
98
+ "modules_to_save": None,
99
+ }
100
+ train_conf = TrainingArguments(**training_config)
101
+ peft_conf = LoraConfig(**peft_config)
102
+
103
+
104
+ ###############
105
+ # Setup logging
106
+ ###############
107
+ logging.basicConfig(
108
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
109
+ datefmt="%Y-%m-%d %H:%M:%S",
110
+ handlers=[logging.StreamHandler(sys.stdout)],
111
+ )
112
+ log_level = train_conf.get_process_log_level()
113
+ logger.setLevel(log_level)
114
+ datasets.utils.logging.set_verbosity(log_level)
115
+ transformers.utils.logging.set_verbosity(log_level)
116
+ transformers.utils.logging.enable_default_handler()
117
+ transformers.utils.logging.enable_explicit_format()
118
+
119
+ # Log on each process a small summary
120
+ logger.warning(
121
+ f"Process rank: {train_conf.local_rank}, device: {train_conf.device}, n_gpu: {train_conf.n_gpu}"
122
+ + f" distributed training: {bool(train_conf.local_rank != -1)}, 16-bits training: {train_conf.fp16}"
123
+ )
124
+ logger.info(f"Training/evaluation parameters {train_conf}")
125
+ logger.info(f"PEFT parameters {peft_conf}")
126
+
127
+
128
+ ################
129
+ # Model Loading
130
+ ################
131
+ checkpoint_path = "microsoft/Phi-4-mini-instruct"
132
+ model_kwargs = dict(
133
+ use_cache=False,
134
+ trust_remote_code=True,
135
+ attn_implementation="flash_attention_2", # loading the model with flash-attenstion support
136
+ torch_dtype=torch.bfloat16,
137
+ device_map=None
138
+ )
139
+ model = AutoModelForCausalLM.from_pretrained(checkpoint_path, **model_kwargs)
140
+ tokenizer = AutoTokenizer.from_pretrained(checkpoint_path)
141
+ tokenizer.model_max_length = 2048
142
+ tokenizer.pad_token = tokenizer.unk_token # use unk rather than eos token to prevent endless generation
143
+ tokenizer.pad_token_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token)
144
+ tokenizer.padding_side = 'right'
145
+
146
+
147
+ ##################
148
+ # Data Processing
149
+ ##################
150
+ def apply_chat_template(
151
+ example,
152
+ tokenizer,
153
+ ):
154
+ messages = example["messages"]
155
+ example["text"] = tokenizer.apply_chat_template(
156
+ messages, tokenize=False, add_generation_prompt=False)
157
+ return example
158
+
159
+
160
+ train_dataset, test_dataset = load_dataset("HuggingFaceH4/ultrachat_200k", split=["train_sft", "test_sft"])
161
+ column_names = list(train_dataset.features)
162
+
163
+ processed_train_dataset = train_dataset.map(
164
+ apply_chat_template,
165
+ fn_kwargs={"tokenizer": tokenizer},
166
+ num_proc=10,
167
+ remove_columns=column_names,
168
+ desc="Applying chat template to train_sft",
169
+ )
170
+
171
+ processed_test_dataset = test_dataset.map(
172
+ apply_chat_template,
173
+ fn_kwargs={"tokenizer": tokenizer},
174
+ num_proc=10,
175
+ remove_columns=column_names,
176
+ desc="Applying chat template to test_sft",
177
+ )
178
+
179
+
180
+ ###########
181
+ # Training
182
+ ###########
183
+ trainer = SFTTrainer(
184
+ model=model,
185
+ args=train_conf,
186
+ peft_config=peft_conf,
187
+ train_dataset=processed_train_dataset,
188
+ eval_dataset=processed_test_dataset,
189
+ max_seq_length=2048,
190
+ dataset_text_field="text",
191
+ tokenizer=tokenizer,
192
+ packing=True
193
+ )
194
+ train_result = trainer.train()
195
+ metrics = train_result.metrics
196
+ trainer.log_metrics("train", metrics)
197
+ trainer.save_metrics("train", metrics)
198
+ trainer.save_state()
199
+
200
+
201
+ #############
202
+ # Evaluation
203
+ #############
204
+ tokenizer.padding_side = 'left'
205
+ metrics = trainer.evaluate()
206
+ metrics["eval_samples"] = len(processed_test_dataset)
207
+ trainer.log_metrics("eval", metrics)
208
+ trainer.save_metrics("eval", metrics)
209
+
210
+
211
+ # ############
212
+ # # Save model
213
+ # ############
214
+ trainer.save_model(train_conf.output_dir)