EtashGuha's picture
Upload configs.yaml with huggingface_hub
e2514eb verified
assistant_tag: gpt
bf16: true
content_tag: value
cutoff_len: 16384
dataset: /leonardo_work/EUHPC_E03_068/eguha/datasets_cache/mlfoundations-dev/instruction_filtering_random_code
dataset_dir: ONLINE
ddp_timeout: 180000000
deepspeed: dcft/train/zero3_offload.json
do_train: true
enable_liger_kernel: true
finetuning_type: full
formatting: sharegpt
global_batch_size: 96
gradient_accumulation_steps: 3
hub_model_id: mlfoundations-dev/DCFT-instruction_filtering_random_code-etash
include_hp: dcft/train/hp_settings/reasoning.yaml
learning_rate: 1.0e-05
logging_steps: 1
lr_scheduler_type: cosine
max_samples: 1000000
messages: conversations
model_name_or_path: /leonardo_work/EUHPC_E03_068/eguha/models_cache/Qwen/Qwen2.5-7B-Instruct
num_train_epochs: 3.0
output_dir: /leonardo_work/EUHPC_E03_068/eguha/checkoints/instruction_filtering_random_code
overwrite_cache: true
per_device_train_batch_size: 1
plot_loss: true
preprocessing_num_workers: 16
push_to_db: false
push_to_hub: false
report_to: wandb
role_tag: from
run_name: DCFT-instruction_filtering_random_code-etash
save_strategy: epoch
stage: sft
template: qwen25
user_tag: human
warmup_ratio: 0.1