{ | |
"lora_name": "TinyTR", | |
"always_override": false, | |
"save_steps": 350.0, | |
"micro_batch_size": 6, | |
"batch_size": 0, | |
"epochs": 0.25, | |
"learning_rate": "3e-4", | |
"lr_scheduler_type": "linear", | |
"lora_rank": 64, | |
"lora_alpha": 128, | |
"lora_dropout": 0.1, | |
"cutoff_len": 768, | |
"dataset": "turkish", | |
"eval_dataset": "None", | |
"format": "Chatml", | |
"eval_steps": 0.0, | |
"raw_text_file": "None", | |
"higher_rank_limit": false, | |
"warmup_steps": 120.0, | |
"optimizer": "adamw_torch", | |
"hard_cut_string": "\\n\\n\\n", | |
"train_only_after": "", | |
"stop_at_loss": 0, | |
"add_eos_token": true, | |
"min_chars": 0.0, | |
"report_to": "None", | |
"precize_slicing_overlap": true, | |
"add_eos_token_type": "Every Block", | |
"save_steps_under_loss": 0, | |
"add_bos_token": true, | |
"training_projection": "q-k-v-o", | |
"sliding_window": false, | |
"warmup_ratio": 0, | |
"grad_accumulation": 2, | |
"neft_noise_alpha": 6 | |
} |