mtasic85's picture
train tokenizer 128k
5e3ba51

Train

Environment

cd scripts
python -m venv venv
source venv/bin/activate
pip install -U -r requirements.in

Train Tokenizer

time python -B train_tokenizer.py

Pretrain

python -B prepare_pretrain_datasets.py
CUDA_VISIBLE_DEVICES=0 CUDA_LAUNCH_BLOCKING=0 PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True litgpt pretrain --config pretrain-model-0.yaml
litgpt convert_pretrained_checkpoint ../out/pretrain-0/final/ ../out/pretrain-0-final-checkpoint

CUDA_VISIBLE_DEVICES=0 CUDA_LAUNCH_BLOCKING=0 PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True litgpt pretrain --config pretrain-model-1.yaml
litgpt convert_pretrained_checkpoint ../out/pretrain-1/final/ ../out/pretrain-1-final-checkpoint

CUDA_VISIBLE_DEVICES=0 CUDA_LAUNCH_BLOCKING=0 PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True litgpt pretrain --config pretrain-model-2.yaml
litgpt convert_pretrained_checkpoint ../out/pretrain-2/final/ ../out/pretrain-2-final-checkpoint

CUDA_VISIBLE_DEVICES=0 CUDA_LAUNCH_BLOCKING=0 PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True litgpt pretrain --config pretrain-model-3.yaml
litgpt convert_pretrained_checkpoint ../out/pretrain-3/final/ ../out/pretrain-3-final-checkpoint

CUDA_VISIBLE_DEVICES=0 CUDA_LAUNCH_BLOCKING=0 PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True litgpt pretrain --config pretrain-model-4.yaml
litgpt convert_pretrained_checkpoint ../out/pretrain-4/final/ ../out/pretrain-4-final-checkpoint

# NOTE: unused
#   CUDA_VISIBLE_DEVICES=0 CUDA_LAUNCH_BLOCKING=0 PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True litgpt pretrain --config pretrain-model-5.yaml
#   litgpt convert_pretrained_checkpoint ../out/pretrain-5/final/ ../out/pretrain-5-final-checkpoint

Continued Pretraining

python -B prepare_contrain_datasets.py
CUDA_VISIBLE_DEVICES=0 CUDA_LAUNCH_BLOCKING=0 PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True litgpt pretrain --config contrain-model-0.yaml
litgpt convert_pretrained_checkpoint ../out/contrain-0/final/ ../out/contrain-0-final-checkpoint

CUDA_VISIBLE_DEVICES=0 CUDA_LAUNCH_BLOCKING=0 PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True litgpt pretrain --config contrain-model-1.yaml
litgpt convert_pretrained_checkpoint ../out/contrain-1/final/ ../out/contrain-1-final-checkpoint

Chat with Pretrained model

CUDA_VISIBLE_DEVICES=0 CUDA_LAUNCH_BLOCKING=0 PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True litgpt chat out/pretrain-0/final/
CUDA_VISIBLE_DEVICES=0 CUDA_LAUNCH_BLOCKING=0 PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True litgpt chat out/pretrain-1/final/
CUDA_VISIBLE_DEVICES=0 CUDA_LAUNCH_BLOCKING=0 PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True litgpt chat out/pretrain-2/final/
CUDA_VISIBLE_DEVICES=0 CUDA_LAUNCH_BLOCKING=0 PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True litgpt chat out/pretrain-3/final/
CUDA_VISIBLE_DEVICES=0 CUDA_LAUNCH_BLOCKING=0 PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True litgpt chat out/pretrain-4/final/
# CUDA_VISIBLE_DEVICES=0 CUDA_LAUNCH_BLOCKING=0 PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True litgpt chat out/pretrain-5/final/

Model

Pretraining

litgpt pretrain --config ./pretrain-model.yaml
litgpt convert_from_litgpt out/pretrain/final/ out/converted_pretrain
cp config.json out/pretrain/final/
cp config.json out/converted_pretrain/
import torch
from safetensors.torch import save_file

state_dict = torch.load('out/converted_pretrain/model.pth', map_location='cpu')
save_file(state_dict, 'out/converted_pretrain/model.safetensors')

Continued Pretraining

litgpt convert_pretrained_checkpoint out/pretrain/final/ out/pretrain_checkpoint/final/
cp config.json out/pretrain_checkpoint/final/

litgpt pretrain --config ./contrain-model.yaml
litgpt convert_from_litgpt out/contrain/final/ out/converted_contrain
cp config.json out/converted_contrain/
import torch
from safetensors.torch import save_file

state_dict = torch.load('out/converted_contrain/model.pth', map_location='cpu')
save_file(state_dict, 'out/converted_contrain/model.safetensors')
cp out/converted_contrain/model.pth ./
cp out/converted_contrain/model.safetensors ./