kcsmta commited on
Commit
bb7e2be
·
verified ·
1 Parent(s): 07aedb2

Upload config.yaml with huggingface_hub

Browse files
Files changed (1) hide show
  1. config.yaml +42 -0
config.yaml ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ exp_name: 'vi-en-fix-v1'
2
+
3
+ # Training dataset (from Huggingface)
4
+ # data_source: "MedCat/MedCAT-SFT-v1"
5
+ data_source: "MedCat/MedCAT-SFT-v1.1"
6
+
7
+ # The base model (from HuggingFace model hub)
8
+ # model_name: "Qwen/Qwen2.5-0.5B"
9
+ model_name: "MedCat/MedCAT-PT-Qwen2.5-0.5B-v1-stream-data-v1-checkpoint-600000"
10
+ # model_name: "MedCat/MedCAT-PT-Apollo-0.5B-v1-stream-data-v1-checkpoint-600000"
11
+
12
+ # Tokenizer
13
+ tokenizer_batch_size: 1_000
14
+ max_length: 512
15
+
16
+ # Checkpoints configuration
17
+ output_folder: "./checkpoints/MedCAT-SFT" # Where to save checkpoints during the training
18
+ save_total_limit: 2 # Limit on number of checkpoints to keep
19
+ save_strategy: "steps" # Saving strategy (either 'steps' or 'epoch')
20
+ save_steps: 500 # Save model every ... steps
21
+
22
+ # LoRA
23
+ r: 8 # Rank of the low-rank matrices
24
+ lora_alpha: 32 # LoRA alpha
25
+ lora_dropout: 0.1 # Dropout rate
26
+ bias: "none" # Whether to train biases ("none", "all", or "lora_only")
27
+ task_type: "CAUSAL_LM" # Task type: casual language modeling
28
+
29
+ # Logging configuration
30
+ logging_dir: "./logs/MedCAT-SFT" # Directory for logs + base_model + data_version
31
+ logging_steps: 100 # Frequency of logging
32
+
33
+ # Training configuration
34
+ per_device_train_batch_size: 4 # Training batch size
35
+ per_device_eval_batch_size: 4 # Evaluation batch size
36
+ num_train_epochs: 2 # Number of epochs
37
+ # max_steps: 500 # Total training steps (or use num_train_epochs instead)
38
+ eval_steps: 500 # Frequency of evaluation. Should equal to logging_steps (can be different, but should be equal)
39
+ evaluation_strategy: "steps" # Evaluation strategy (either 'steps' or 'epoch')
40
+ seed: 3407 # Random seed for reproducibility
41
+ gradient_accumulation_steps: 8
42
+ learning_rate: 0.00001