mtasic85's picture
eval
a123ed0
|
raw
history blame
1.28 kB

Train

Tokenizer

cd scripts
python -m venv venv
source venv/bin/activate
pip install -U -r requirements.in
python -B train_tokenizer.py

Dataset

cd scripts
python -m venv venv-lit
source venv-lit/bin/activate
pip install -U -r requirements-lit.in
python -B prepare_pretrain_dataset.py

Model

cd scripts
python -m venv venv-lit
source venv-lit/bin/activate
pip install -U -r requirements-lit.in
litgpt pretrain --config ./model.yaml
litgpt convert_from_litgpt out/pretrain/final/ out/converted_model
cp config.json out/pretrain/final/
cp config.json out/converted_model/
import torch
from transformers import AutoModel

state_dict = torch.load('out/converted_model/model.pth')
model = AutoModel.from_pretrained('TinyLlama/TinyLlama_v1.1', state_dict=state_dict, ignore_mismatched_sizes=True)
model.save_pretrained('out/converted_model/')

Evaluate

# litgpt evaluate --tasks 'hellaswag,gsm8k,truthfulqa_mc2,mmlu,winogrande,arc_challenge' --batch_size 8 out/pretrain/final/

litgpt evaluate --tasks 'hellaswag,gsm8k,truthfulqa_mc2,mmlu,mmlu_pro,winogrande,arc_challenge,leaderboard,ifeval,mgsm_direct,mathqa,gpqa' --batch_size 8 out/pretrain/final/