Llama-3.1-8B-BookAdventures / training_args.yaml
g4rg's picture
Upload folder using huggingface_hub
99d2a01 verified
raw
history blame contribute delete
766 Bytes
bf16: true
cutoff_len: 32768
dataset: floydpaca
dataset_dir: data
ddp_timeout: 180000000
deepspeed: cache/ds_z3_config.json
do_train: true
finetuning_type: full
flash_attn: fa2
gradient_accumulation_steps: 4
include_num_input_tokens_seen: true
learning_rate: 2.0e-06
logging_steps: 1
lr_scheduler_type: cosine
max_grad_norm: 1.0
max_samples: 10000000
model_name_or_path: KoboldAI/L3.1-bookgen-infinity3m-bookgen-32k-fft
neat_packing: true
num_train_epochs: 1.0
optim: paged_adamw_8bit
output_dir: saves/LLaMA3.1-8B/full/L3.1-bookgen-infinity3m-bookgen-floydpaca-32k-fft-alpaca3
packing: true
per_device_train_batch_size: 1
plot_loss: true
preprocessing_num_workers: 16
report_to: none
save_steps: 25
stage: sft
template: alpaca
train_on_prompt: true
warmup_steps: 0