jihong008 commited on
Commit
6806335
·
verified ·
1 Parent(s): a062bc6

Training in progress, step 28

Browse files
adapter_config.json CHANGED
@@ -4,7 +4,7 @@
4
  "base_model_class": "MusicgenMelodyForConditionalGeneration",
5
  "parent_library": "transformers.models.musicgen_melody.modeling_musicgen_melody"
6
  },
7
- "base_model_name_or_path": "facebook/musicgen-melody",
8
  "bias": "none",
9
  "fan_in_fan_out": false,
10
  "inference_mode": true,
@@ -13,32 +13,32 @@
13
  "layers_pattern": null,
14
  "layers_to_transform": null,
15
  "loftq_config": {},
16
- "lora_alpha": 8,
17
  "lora_dropout": 0.05,
18
  "megatron_config": null,
19
  "megatron_core": "megatron.core",
20
  "modules_to_save": null,
21
  "peft_type": "LORA",
22
- "r": 8,
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
26
- "fc2",
27
- "lm_heads.3",
28
- "lm_heads.2",
29
- "q_proj",
30
- "embed_tokens.3",
31
  "lm_heads.0",
 
 
 
32
  "audio_enc_to_dec_proj",
33
  "fc1",
34
- "enc_to_dec_proj",
35
- "embed_tokens.2",
36
  "k_proj",
37
- "embed_tokens.0",
38
- "out_proj",
39
  "lm_heads.1",
40
- "embed_tokens.1",
41
- "v_proj"
 
 
 
 
42
  ],
43
  "task_type": null,
44
  "use_dora": false,
 
4
  "base_model_class": "MusicgenMelodyForConditionalGeneration",
5
  "parent_library": "transformers.models.musicgen_melody.modeling_musicgen_melody"
6
  },
7
+ "base_model_name_or_path": "/data/data3/hangzeli/work_lab/workspace/multimodal/huggingface_model/musicgen-melody",
8
  "bias": "none",
9
  "fan_in_fan_out": false,
10
  "inference_mode": true,
 
13
  "layers_pattern": null,
14
  "layers_to_transform": null,
15
  "loftq_config": {},
16
+ "lora_alpha": 16,
17
  "lora_dropout": 0.05,
18
  "megatron_config": null,
19
  "megatron_core": "megatron.core",
20
  "modules_to_save": null,
21
  "peft_type": "LORA",
22
+ "r": 16,
23
  "rank_pattern": {},
24
  "revision": null,
25
  "target_modules": [
 
 
 
 
 
26
  "lm_heads.0",
27
+ "v_proj",
28
+ "enc_to_dec_proj",
29
+ "embed_tokens.1",
30
  "audio_enc_to_dec_proj",
31
  "fc1",
32
+ "fc2",
33
+ "q_proj",
34
  "k_proj",
 
 
35
  "lm_heads.1",
36
+ "lm_heads.2",
37
+ "lm_heads.3",
38
+ "out_proj",
39
+ "embed_tokens.2",
40
+ "embed_tokens.3",
41
+ "embed_tokens.0"
42
  ],
43
  "task_type": null,
44
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e9d3d4aeaad756a5f6fe85132368a98a076d7c00abd1c5b2211ccc997f6aa91d
3
- size 43594480
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edbe6968ce91e71f4bba96413aaec339c38f9d2726f1b5bdcda8c0c8a61744d9
3
+ size 87103456
all_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 3.6721311475409837,
3
- "train_loss": 7.743570498057774,
4
- "train_runtime": 121.4704,
5
  "train_samples": 122,
6
- "train_samples_per_second": 4.017,
7
- "train_steps_per_second": 0.231
8
  }
 
1
  {
2
  "epoch": 3.6721311475409837,
3
+ "train_loss": 7.744901963642666,
4
+ "train_runtime": 121.5966,
5
  "train_samples": 122,
6
+ "train_samples_per_second": 4.013,
7
+ "train_steps_per_second": 0.23
8
  }
tokenizer.json CHANGED
@@ -1,7 +1,14 @@
1
  {
2
  "version": "1.0",
3
  "truncation": null,
4
- "padding": null,
 
 
 
 
 
 
 
5
  "added_tokens": [
6
  {
7
  "id": 0,
 
1
  {
2
  "version": "1.0",
3
  "truncation": null,
4
+ "padding": {
5
+ "strategy": "BatchLongest",
6
+ "direction": "Right",
7
+ "pad_to_multiple_of": null,
8
+ "pad_id": 0,
9
+ "pad_type_id": 0,
10
+ "pad_token": "<pad>"
11
+ },
12
  "added_tokens": [
13
  {
14
  "id": 0,
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
  "epoch": 3.6721311475409837,
3
- "train_loss": 7.743570498057774,
4
- "train_runtime": 121.4704,
5
  "train_samples": 122,
6
- "train_samples_per_second": 4.017,
7
- "train_steps_per_second": 0.231
8
  }
 
1
  {
2
  "epoch": 3.6721311475409837,
3
+ "train_loss": 7.744901963642666,
4
+ "train_runtime": 121.5966,
5
  "train_samples": 122,
6
+ "train_samples_per_second": 4.013,
7
+ "train_steps_per_second": 0.23
8
  }
trainer_state.json CHANGED
@@ -10,110 +10,110 @@
10
  "log_history": [
11
  {
12
  "epoch": 0.26229508196721313,
13
- "grad_norm": 1.0287491083145142,
14
  "learning_rate": 0.00018571428571428572,
15
- "loss": 9.4306,
16
  "step": 2
17
  },
18
  {
19
  "epoch": 0.5245901639344263,
20
- "grad_norm": 1.8649983406066895,
21
  "learning_rate": 0.00017142857142857143,
22
- "loss": 9.1591,
23
  "step": 4
24
  },
25
  {
26
  "epoch": 0.7868852459016393,
27
- "grad_norm": 1.8567523956298828,
28
  "learning_rate": 0.00015714285714285716,
29
- "loss": 8.7072,
30
  "step": 6
31
  },
32
  {
33
  "epoch": 1.0491803278688525,
34
- "grad_norm": 2.180053234100342,
35
  "learning_rate": 0.00014285714285714287,
36
- "loss": 8.2864,
37
  "step": 8
38
  },
39
  {
40
  "epoch": 1.3114754098360657,
41
- "grad_norm": 2.318220853805542,
42
  "learning_rate": 0.00012857142857142858,
43
- "loss": 7.8084,
44
  "step": 10
45
  },
46
  {
47
  "epoch": 1.5737704918032787,
48
- "grad_norm": 2.61151385307312,
49
  "learning_rate": 0.00011428571428571428,
50
- "loss": 7.8655,
51
  "step": 12
52
  },
53
  {
54
  "epoch": 1.8360655737704918,
55
- "grad_norm": 2.6175003051757812,
56
  "learning_rate": 0.0001,
57
- "loss": 7.6385,
58
  "step": 14
59
  },
60
  {
61
  "epoch": 2.098360655737705,
62
- "grad_norm": 1.4860339164733887,
63
  "learning_rate": 8.571428571428571e-05,
64
- "loss": 7.0809,
65
  "step": 16
66
  },
67
  {
68
  "epoch": 2.360655737704918,
69
- "grad_norm": 1.5985082387924194,
70
  "learning_rate": 7.142857142857143e-05,
71
- "loss": 7.2482,
72
  "step": 18
73
  },
74
  {
75
  "epoch": 2.6229508196721314,
76
- "grad_norm": 1.956580638885498,
77
  "learning_rate": 5.714285714285714e-05,
78
- "loss": 7.0691,
79
  "step": 20
80
  },
81
  {
82
  "epoch": 2.8852459016393444,
83
- "grad_norm": 1.449498176574707,
84
  "learning_rate": 4.2857142857142856e-05,
85
- "loss": 7.0927,
86
  "step": 22
87
  },
88
  {
89
  "epoch": 3.1475409836065573,
90
- "grad_norm": 1.9451191425323486,
91
  "learning_rate": 2.857142857142857e-05,
92
- "loss": 6.8079,
93
  "step": 24
94
  },
95
  {
96
  "epoch": 3.4098360655737707,
97
- "grad_norm": 1.4124294519424438,
98
  "learning_rate": 1.4285714285714285e-05,
99
- "loss": 7.0556,
100
  "step": 26
101
  },
102
  {
103
  "epoch": 3.6721311475409837,
104
- "grad_norm": 1.6439225673675537,
105
  "learning_rate": 0.0,
106
- "loss": 7.1598,
107
  "step": 28
108
  },
109
  {
110
  "epoch": 3.6721311475409837,
111
  "step": 28,
112
- "total_flos": 131289516862176.0,
113
- "train_loss": 7.743570498057774,
114
- "train_runtime": 121.4704,
115
- "train_samples_per_second": 4.017,
116
- "train_steps_per_second": 0.231
117
  }
118
  ],
119
  "logging_steps": 2,
@@ -133,7 +133,7 @@
133
  "attributes": {}
134
  }
135
  },
136
- "total_flos": 131289516862176.0,
137
  "train_batch_size": 2,
138
  "trial_name": null,
139
  "trial_params": null
 
10
  "log_history": [
11
  {
12
  "epoch": 0.26229508196721313,
13
+ "grad_norm": 0.9730982184410095,
14
  "learning_rate": 0.00018571428571428572,
15
+ "loss": 9.4352,
16
  "step": 2
17
  },
18
  {
19
  "epoch": 0.5245901639344263,
20
+ "grad_norm": 1.8006277084350586,
21
  "learning_rate": 0.00017142857142857143,
22
+ "loss": 9.1689,
23
  "step": 4
24
  },
25
  {
26
  "epoch": 0.7868852459016393,
27
+ "grad_norm": 1.831032395362854,
28
  "learning_rate": 0.00015714285714285716,
29
+ "loss": 8.7485,
30
  "step": 6
31
  },
32
  {
33
  "epoch": 1.0491803278688525,
34
+ "grad_norm": 2.2559938430786133,
35
  "learning_rate": 0.00014285714285714287,
36
+ "loss": 8.3076,
37
  "step": 8
38
  },
39
  {
40
  "epoch": 1.3114754098360657,
41
+ "grad_norm": 2.454803943634033,
42
  "learning_rate": 0.00012857142857142858,
43
+ "loss": 7.8092,
44
  "step": 10
45
  },
46
  {
47
  "epoch": 1.5737704918032787,
48
+ "grad_norm": 2.537230968475342,
49
  "learning_rate": 0.00011428571428571428,
50
+ "loss": 7.8537,
51
  "step": 12
52
  },
53
  {
54
  "epoch": 1.8360655737704918,
55
+ "grad_norm": 2.46220326423645,
56
  "learning_rate": 0.0001,
57
+ "loss": 7.5995,
58
  "step": 14
59
  },
60
  {
61
  "epoch": 2.098360655737705,
62
+ "grad_norm": 1.449030876159668,
63
  "learning_rate": 8.571428571428571e-05,
64
+ "loss": 7.0811,
65
  "step": 16
66
  },
67
  {
68
  "epoch": 2.360655737704918,
69
+ "grad_norm": 1.5821515321731567,
70
  "learning_rate": 7.142857142857143e-05,
71
+ "loss": 7.2538,
72
  "step": 18
73
  },
74
  {
75
  "epoch": 2.6229508196721314,
76
+ "grad_norm": 1.8766202926635742,
77
  "learning_rate": 5.714285714285714e-05,
78
+ "loss": 7.068,
79
  "step": 20
80
  },
81
  {
82
  "epoch": 2.8852459016393444,
83
+ "grad_norm": 1.4384005069732666,
84
  "learning_rate": 4.2857142857142856e-05,
85
+ "loss": 7.0955,
86
  "step": 22
87
  },
88
  {
89
  "epoch": 3.1475409836065573,
90
+ "grad_norm": 1.8369501829147339,
91
  "learning_rate": 2.857142857142857e-05,
92
+ "loss": 6.7993,
93
  "step": 24
94
  },
95
  {
96
  "epoch": 3.4098360655737707,
97
+ "grad_norm": 1.4276976585388184,
98
  "learning_rate": 1.4285714285714285e-05,
99
+ "loss": 7.053,
100
  "step": 26
101
  },
102
  {
103
  "epoch": 3.6721311475409837,
104
+ "grad_norm": 1.627907156944275,
105
  "learning_rate": 0.0,
106
+ "loss": 7.1552,
107
  "step": 28
108
  },
109
  {
110
  "epoch": 3.6721311475409837,
111
  "step": 28,
112
+ "total_flos": 131381250733656.0,
113
+ "train_loss": 7.744901963642666,
114
+ "train_runtime": 121.5966,
115
+ "train_samples_per_second": 4.013,
116
+ "train_steps_per_second": 0.23
117
  }
118
  ],
119
  "logging_steps": 2,
 
133
  "attributes": {}
134
  }
135
  },
136
+ "total_flos": 131381250733656.0,
137
  "train_batch_size": 2,
138
  "trial_name": null,
139
  "trial_params": null
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a1637a4130ec7c578e47549d30e4fc16da4b978ca4e0d66b5de6e12e322f6ff7
3
  size 5304
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ecdfb3bc0586502d5167e8f227e1e825f93db0647c910a4157114e83c2f11d51
3
  size 5304