# Model arguments | |
model_name_or_path: sanchit-gandhi/Mistral-7B-v0.1-6-layer | |
model_revision: main | |
torch_dtype: bfloat16 | |
use_flash_attention_2: true | |
# Data training arguments | |
chat_template: "{% for message in messages %}\n{% if message['role'] == 'user' %}\n{{ '<|user|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'system' %}\n{{ '<|system|>\n' + message['content'] + eos_token }}\n{% elif message['role'] == 'assistant' %}\n{{ '<|assistant|>\n' + message['content'] + eos_token }}\n{% endif %}\n{% if loop.last and add_generation_prompt %}\n{{ '<|assistant|>' }}\n{% endif %}\n{% endfor %}" | |
dataset_mixer: | |
HuggingFaceH4/ultrachat_200k: 1.0 | |
dataset_splits: | |
- train_sft | |
- test_sft | |
preprocessing_num_workers: 12 | |
# SFT trainer config | |
bf16: true | |
do_eval: true | |
evaluation_strategy: epoch | |
gradient_accumulation_steps: 1 | |
gradient_checkpointing: true | |
gradient_checkpointing_kwargs: | |
use_reentrant: False | |
hub_strategy: every_save | |
learning_rate: 3.0e-04 | |
log_level: info | |
logging_steps: 10 | |
logging_strategy: steps | |
lr_scheduler_type: cosine | |
max_seq_length: 2048 | |
max_steps: -1 | |
num_train_epochs: 5 | |
output_dir: ./ | |
overwrite_output_dir: true | |
per_device_eval_batch_size: 32 | |
per_device_train_batch_size: 64 | |
push_to_hub: true | |
remove_unused_columns: true | |
report_to: | |
- tensorboard | |
- wandb | |
save_strategy: "epoch" | |
save_total_limit: 1 | |
seed: 42 | |
warmup_ratio: 0.1 | |