# Train ## Tokenizer ```bash cd scripts python -m venv venv source venv/bin/activate pip install -U -r requirements.in ``` ```bash python -B train_tokenizer.py ``` ## Dataset ```bash cd scripts python -m venv venv-lit source venv-lit/bin/activate pip install -U -r requirements-lit.in ``` ```bash python -B prepare_pretrain_dataset.py ``` ## Model ```bash cd scripts python -m venv venv-lit source venv-lit/bin/activate pip install -U -r requirements-lit.in ``` ```bash litgpt pretrain --config ./model.yaml ``` ```bash litgpt convert_from_litgpt out/pretrain/final/ out/converted_model cp config.json out/pretrain/final/ cp config.json out/converted_model/ ``` ```python import torch from transformers import AutoModel state_dict = torch.load('out/converted_model/model.pth') model = AutoModel.from_pretrained('TinyLlama/TinyLlama_v1.1', state_dict=state_dict, ignore_mismatched_sizes=True) model.save_pretrained('out/converted_model/') ``` ## Evaluate ```bash # litgpt evaluate --tasks 'hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge' --batch_size 8 out/pretrain/final/ litgpt evaluate --tasks 'hellaswag,gsm8k,truthfulqa_mc2,mmlu,mmlu_pro,winogrande,arc_challenge,leaderboard,ifeval,mgsm_direct,mathqa,gpqa' --batch_size 8 out/pretrain/final/ ```