mtasic85 commited on
Commit
f5110dc
1 Parent(s): 369804b

train model

Browse files
Files changed (2) hide show
  1. scripts/TRAIN.md +1 -1
  2. scripts/model.yaml +8 -3
scripts/TRAIN.md CHANGED
@@ -36,5 +36,5 @@ pip install -U -r requirements-lit.in
36
  ```
37
 
38
  ```bash
39
- litgpt pretrain --data LitData --data.data_path "../data/" --config ./model.yaml
40
  ```
 
36
  ```
37
 
38
  ```bash
39
+ litgpt pretrain --config ./model.yaml
40
  ```
scripts/model.yaml CHANGED
@@ -42,7 +42,12 @@ initial_checkpoint_dir:
42
  resume: "auto"
43
 
44
  # Data-related arguments. If not provided, the default is ``litgpt.data.TinyLlama``.
45
- # data: LitData
 
 
 
 
 
46
 
47
  # Training-related arguments. See ``litgpt.args.TrainArgs`` for details
48
  train:
@@ -56,8 +61,8 @@ train:
56
  global_batch_size: 512
57
 
58
  # Number of samples per data-parallel rank (type: int, default: 4)
59
- micro_batch_size: 16
60
- # micro_batch_size: 15
61
 
62
  # Number of iterations with learning rate warmup active (type: int, default: 2000)
63
  lr_warmup_steps: 2000
 
42
  resume: "auto"
43
 
44
  # Data-related arguments. If not provided, the default is ``litgpt.data.TinyLlama``.
45
+ data:
46
+ class_path: LitData
47
+ init_args:
48
+ data_path: "../data/"
49
+ # num_workers: 16
50
+ num_workers: 3
51
 
52
  # Training-related arguments. See ``litgpt.args.TrainArgs`` for details
53
  train:
 
61
  global_batch_size: 512
62
 
63
  # Number of samples per data-parallel rank (type: int, default: 4)
64
+ # micro_batch_size: 16
65
+ micro_batch_size: 15
66
 
67
  # Number of iterations with learning rate warmup active (type: int, default: 2000)
68
  lr_warmup_steps: 2000