John6666 commited on
Commit
45b8b16
1 Parent(s): bffe899

Upload flux_clip_keys.json

Browse files
Files changed (1) hide show
  1. flux_clip_keys.json +197 -197
flux_clip_keys.json CHANGED
@@ -1,201 +1,201 @@
1
  [
2
- "embeddings.position_embedding.weight",
3
- "embeddings.token_embedding.weight",
4
- "encoder.layers.0.layer_norm1.bias",
5
- "encoder.layers.0.layer_norm1.weight",
6
- "encoder.layers.0.layer_norm2.bias",
7
- "encoder.layers.0.layer_norm2.weight",
8
- "encoder.layers.0.mlp.fc1.bias",
9
- "encoder.layers.0.mlp.fc1.weight",
10
- "encoder.layers.0.mlp.fc2.bias",
11
- "encoder.layers.0.mlp.fc2.weight",
12
- "encoder.layers.0.self_attn.k_proj.bias",
13
- "encoder.layers.0.self_attn.k_proj.weight",
14
- "encoder.layers.0.self_attn.out_proj.bias",
15
- "encoder.layers.0.self_attn.out_proj.weight",
16
- "encoder.layers.0.self_attn.q_proj.bias",
17
- "encoder.layers.0.self_attn.q_proj.weight",
18
- "encoder.layers.0.self_attn.v_proj.bias",
19
- "encoder.layers.0.self_attn.v_proj.weight",
20
- "encoder.layers.1.layer_norm1.bias",
21
- "encoder.layers.1.layer_norm1.weight",
22
- "encoder.layers.1.layer_norm2.bias",
23
- "encoder.layers.1.layer_norm2.weight",
24
- "encoder.layers.1.mlp.fc1.bias",
25
- "encoder.layers.1.mlp.fc1.weight",
26
- "encoder.layers.1.mlp.fc2.bias",
27
- "encoder.layers.1.mlp.fc2.weight",
28
- "encoder.layers.1.self_attn.k_proj.bias",
29
- "encoder.layers.1.self_attn.k_proj.weight",
30
- "encoder.layers.1.self_attn.out_proj.bias",
31
- "encoder.layers.1.self_attn.out_proj.weight",
32
- "encoder.layers.1.self_attn.q_proj.bias",
33
- "encoder.layers.1.self_attn.q_proj.weight",
34
- "encoder.layers.1.self_attn.v_proj.bias",
35
- "encoder.layers.1.self_attn.v_proj.weight",
36
- "encoder.layers.10.layer_norm1.bias",
37
- "encoder.layers.10.layer_norm1.weight",
38
- "encoder.layers.10.layer_norm2.bias",
39
- "encoder.layers.10.layer_norm2.weight",
40
- "encoder.layers.10.mlp.fc1.bias",
41
- "encoder.layers.10.mlp.fc1.weight",
42
- "encoder.layers.10.mlp.fc2.bias",
43
- "encoder.layers.10.mlp.fc2.weight",
44
- "encoder.layers.10.self_attn.k_proj.bias",
45
- "encoder.layers.10.self_attn.k_proj.weight",
46
- "encoder.layers.10.self_attn.out_proj.bias",
47
- "encoder.layers.10.self_attn.out_proj.weight",
48
- "encoder.layers.10.self_attn.q_proj.bias",
49
- "encoder.layers.10.self_attn.q_proj.weight",
50
- "encoder.layers.10.self_attn.v_proj.bias",
51
- "encoder.layers.10.self_attn.v_proj.weight",
52
- "encoder.layers.11.layer_norm1.bias",
53
- "encoder.layers.11.layer_norm1.weight",
54
- "encoder.layers.11.layer_norm2.bias",
55
- "encoder.layers.11.layer_norm2.weight",
56
- "encoder.layers.11.mlp.fc1.bias",
57
- "encoder.layers.11.mlp.fc1.weight",
58
- "encoder.layers.11.mlp.fc2.bias",
59
- "encoder.layers.11.mlp.fc2.weight",
60
- "encoder.layers.11.self_attn.k_proj.bias",
61
- "encoder.layers.11.self_attn.k_proj.weight",
62
- "encoder.layers.11.self_attn.out_proj.bias",
63
- "encoder.layers.11.self_attn.out_proj.weight",
64
- "encoder.layers.11.self_attn.q_proj.bias",
65
- "encoder.layers.11.self_attn.q_proj.weight",
66
- "encoder.layers.11.self_attn.v_proj.bias",
67
- "encoder.layers.11.self_attn.v_proj.weight",
68
- "encoder.layers.2.layer_norm1.bias",
69
- "encoder.layers.2.layer_norm1.weight",
70
- "encoder.layers.2.layer_norm2.bias",
71
- "encoder.layers.2.layer_norm2.weight",
72
- "encoder.layers.2.mlp.fc1.bias",
73
- "encoder.layers.2.mlp.fc1.weight",
74
- "encoder.layers.2.mlp.fc2.bias",
75
- "encoder.layers.2.mlp.fc2.weight",
76
- "encoder.layers.2.self_attn.k_proj.bias",
77
- "encoder.layers.2.self_attn.k_proj.weight",
78
- "encoder.layers.2.self_attn.out_proj.bias",
79
- "encoder.layers.2.self_attn.out_proj.weight",
80
- "encoder.layers.2.self_attn.q_proj.bias",
81
- "encoder.layers.2.self_attn.q_proj.weight",
82
- "encoder.layers.2.self_attn.v_proj.bias",
83
- "encoder.layers.2.self_attn.v_proj.weight",
84
- "encoder.layers.3.layer_norm1.bias",
85
- "encoder.layers.3.layer_norm1.weight",
86
- "encoder.layers.3.layer_norm2.bias",
87
- "encoder.layers.3.layer_norm2.weight",
88
- "encoder.layers.3.mlp.fc1.bias",
89
- "encoder.layers.3.mlp.fc1.weight",
90
- "encoder.layers.3.mlp.fc2.bias",
91
- "encoder.layers.3.mlp.fc2.weight",
92
- "encoder.layers.3.self_attn.k_proj.bias",
93
- "encoder.layers.3.self_attn.k_proj.weight",
94
- "encoder.layers.3.self_attn.out_proj.bias",
95
- "encoder.layers.3.self_attn.out_proj.weight",
96
- "encoder.layers.3.self_attn.q_proj.bias",
97
- "encoder.layers.3.self_attn.q_proj.weight",
98
- "encoder.layers.3.self_attn.v_proj.bias",
99
- "encoder.layers.3.self_attn.v_proj.weight",
100
- "encoder.layers.4.layer_norm1.bias",
101
- "encoder.layers.4.layer_norm1.weight",
102
- "encoder.layers.4.layer_norm2.bias",
103
- "encoder.layers.4.layer_norm2.weight",
104
- "encoder.layers.4.mlp.fc1.bias",
105
- "encoder.layers.4.mlp.fc1.weight",
106
- "encoder.layers.4.mlp.fc2.bias",
107
- "encoder.layers.4.mlp.fc2.weight",
108
- "encoder.layers.4.self_attn.k_proj.bias",
109
- "encoder.layers.4.self_attn.k_proj.weight",
110
- "encoder.layers.4.self_attn.out_proj.bias",
111
- "encoder.layers.4.self_attn.out_proj.weight",
112
- "encoder.layers.4.self_attn.q_proj.bias",
113
- "encoder.layers.4.self_attn.q_proj.weight",
114
- "encoder.layers.4.self_attn.v_proj.bias",
115
- "encoder.layers.4.self_attn.v_proj.weight",
116
- "encoder.layers.5.layer_norm1.bias",
117
- "encoder.layers.5.layer_norm1.weight",
118
- "encoder.layers.5.layer_norm2.bias",
119
- "encoder.layers.5.layer_norm2.weight",
120
- "encoder.layers.5.mlp.fc1.bias",
121
- "encoder.layers.5.mlp.fc1.weight",
122
- "encoder.layers.5.mlp.fc2.bias",
123
- "encoder.layers.5.mlp.fc2.weight",
124
- "encoder.layers.5.self_attn.k_proj.bias",
125
- "encoder.layers.5.self_attn.k_proj.weight",
126
- "encoder.layers.5.self_attn.out_proj.bias",
127
- "encoder.layers.5.self_attn.out_proj.weight",
128
- "encoder.layers.5.self_attn.q_proj.bias",
129
- "encoder.layers.5.self_attn.q_proj.weight",
130
- "encoder.layers.5.self_attn.v_proj.bias",
131
- "encoder.layers.5.self_attn.v_proj.weight",
132
- "encoder.layers.6.layer_norm1.bias",
133
- "encoder.layers.6.layer_norm1.weight",
134
- "encoder.layers.6.layer_norm2.bias",
135
- "encoder.layers.6.layer_norm2.weight",
136
- "encoder.layers.6.mlp.fc1.bias",
137
- "encoder.layers.6.mlp.fc1.weight",
138
- "encoder.layers.6.mlp.fc2.bias",
139
- "encoder.layers.6.mlp.fc2.weight",
140
- "encoder.layers.6.self_attn.k_proj.bias",
141
- "encoder.layers.6.self_attn.k_proj.weight",
142
- "encoder.layers.6.self_attn.out_proj.bias",
143
- "encoder.layers.6.self_attn.out_proj.weight",
144
- "encoder.layers.6.self_attn.q_proj.bias",
145
- "encoder.layers.6.self_attn.q_proj.weight",
146
- "encoder.layers.6.self_attn.v_proj.bias",
147
- "encoder.layers.6.self_attn.v_proj.weight",
148
- "encoder.layers.7.layer_norm1.bias",
149
- "encoder.layers.7.layer_norm1.weight",
150
- "encoder.layers.7.layer_norm2.bias",
151
- "encoder.layers.7.layer_norm2.weight",
152
- "encoder.layers.7.mlp.fc1.bias",
153
- "encoder.layers.7.mlp.fc1.weight",
154
- "encoder.layers.7.mlp.fc2.bias",
155
- "encoder.layers.7.mlp.fc2.weight",
156
- "encoder.layers.7.self_attn.k_proj.bias",
157
- "encoder.layers.7.self_attn.k_proj.weight",
158
- "encoder.layers.7.self_attn.out_proj.bias",
159
- "encoder.layers.7.self_attn.out_proj.weight",
160
- "encoder.layers.7.self_attn.q_proj.bias",
161
- "encoder.layers.7.self_attn.q_proj.weight",
162
- "encoder.layers.7.self_attn.v_proj.bias",
163
- "encoder.layers.7.self_attn.v_proj.weight",
164
- "encoder.layers.8.layer_norm1.bias",
165
- "encoder.layers.8.layer_norm1.weight",
166
- "encoder.layers.8.layer_norm2.bias",
167
- "encoder.layers.8.layer_norm2.weight",
168
- "encoder.layers.8.mlp.fc1.bias",
169
- "encoder.layers.8.mlp.fc1.weight",
170
- "encoder.layers.8.mlp.fc2.bias",
171
- "encoder.layers.8.mlp.fc2.weight",
172
- "encoder.layers.8.self_attn.k_proj.bias",
173
- "encoder.layers.8.self_attn.k_proj.weight",
174
- "encoder.layers.8.self_attn.out_proj.bias",
175
- "encoder.layers.8.self_attn.out_proj.weight",
176
- "encoder.layers.8.self_attn.q_proj.bias",
177
- "encoder.layers.8.self_attn.q_proj.weight",
178
- "encoder.layers.8.self_attn.v_proj.bias",
179
- "encoder.layers.8.self_attn.v_proj.weight",
180
- "encoder.layers.9.layer_norm1.bias",
181
- "encoder.layers.9.layer_norm1.weight",
182
- "encoder.layers.9.layer_norm2.bias",
183
- "encoder.layers.9.layer_norm2.weight",
184
- "encoder.layers.9.mlp.fc1.bias",
185
- "encoder.layers.9.mlp.fc1.weight",
186
- "encoder.layers.9.mlp.fc2.bias",
187
- "encoder.layers.9.mlp.fc2.weight",
188
- "encoder.layers.9.self_attn.k_proj.bias",
189
- "encoder.layers.9.self_attn.k_proj.weight",
190
- "encoder.layers.9.self_attn.out_proj.bias",
191
- "encoder.layers.9.self_attn.out_proj.weight",
192
- "encoder.layers.9.self_attn.q_proj.bias",
193
- "encoder.layers.9.self_attn.q_proj.weight",
194
- "encoder.layers.9.self_attn.v_proj.bias",
195
- "encoder.layers.9.self_attn.v_proj.weight",
196
- "final_layer_norm.bias",
197
- "final_layer_norm.weight",
198
- "text_encoders.clip_l.transformer.text_projection.weight",
199
  "text_encoders.clip_l.transformer.text_model.embeddings.position_embedding.weight",
200
  "text_encoders.clip_l.transformer.text_model.embeddings.token_embedding.weight",
201
  "text_encoders.clip_l.transformer.text_model.encoder.layers.0.layer_norm1.bias",
 
1
  [
2
+ "text_model.embeddings.position_embedding.weight",
3
+ "text_model.embeddings.token_embedding.weight",
4
+ "text_model.encoder.layers.0.layer_norm1.bias",
5
+ "text_model.encoder.layers.0.layer_norm1.weight",
6
+ "text_model.encoder.layers.0.layer_norm2.bias",
7
+ "text_model.encoder.layers.0.layer_norm2.weight",
8
+ "text_model.encoder.layers.0.mlp.fc1.bias",
9
+ "text_model.encoder.layers.0.mlp.fc1.weight",
10
+ "text_model.encoder.layers.0.mlp.fc2.bias",
11
+ "text_model.encoder.layers.0.mlp.fc2.weight",
12
+ "text_model.encoder.layers.0.self_attn.k_proj.bias",
13
+ "text_model.encoder.layers.0.self_attn.k_proj.weight",
14
+ "text_model.encoder.layers.0.self_attn.out_proj.bias",
15
+ "text_model.encoder.layers.0.self_attn.out_proj.weight",
16
+ "text_model.encoder.layers.0.self_attn.q_proj.bias",
17
+ "text_model.encoder.layers.0.self_attn.q_proj.weight",
18
+ "text_model.encoder.layers.0.self_attn.v_proj.bias",
19
+ "text_model.encoder.layers.0.self_attn.v_proj.weight",
20
+ "text_model.encoder.layers.1.layer_norm1.bias",
21
+ "text_model.encoder.layers.1.layer_norm1.weight",
22
+ "text_model.encoder.layers.1.layer_norm2.bias",
23
+ "text_model.encoder.layers.1.layer_norm2.weight",
24
+ "text_model.encoder.layers.1.mlp.fc1.bias",
25
+ "text_model.encoder.layers.1.mlp.fc1.weight",
26
+ "text_model.encoder.layers.1.mlp.fc2.bias",
27
+ "text_model.encoder.layers.1.mlp.fc2.weight",
28
+ "text_model.encoder.layers.1.self_attn.k_proj.bias",
29
+ "text_model.encoder.layers.1.self_attn.k_proj.weight",
30
+ "text_model.encoder.layers.1.self_attn.out_proj.bias",
31
+ "text_model.encoder.layers.1.self_attn.out_proj.weight",
32
+ "text_model.encoder.layers.1.self_attn.q_proj.bias",
33
+ "text_model.encoder.layers.1.self_attn.q_proj.weight",
34
+ "text_model.encoder.layers.1.self_attn.v_proj.bias",
35
+ "text_model.encoder.layers.1.self_attn.v_proj.weight",
36
+ "text_model.encoder.layers.10.layer_norm1.bias",
37
+ "text_model.encoder.layers.10.layer_norm1.weight",
38
+ "text_model.encoder.layers.10.layer_norm2.bias",
39
+ "text_model.encoder.layers.10.layer_norm2.weight",
40
+ "text_model.encoder.layers.10.mlp.fc1.bias",
41
+ "text_model.encoder.layers.10.mlp.fc1.weight",
42
+ "text_model.encoder.layers.10.mlp.fc2.bias",
43
+ "text_model.encoder.layers.10.mlp.fc2.weight",
44
+ "text_model.encoder.layers.10.self_attn.k_proj.bias",
45
+ "text_model.encoder.layers.10.self_attn.k_proj.weight",
46
+ "text_model.encoder.layers.10.self_attn.out_proj.bias",
47
+ "text_model.encoder.layers.10.self_attn.out_proj.weight",
48
+ "text_model.encoder.layers.10.self_attn.q_proj.bias",
49
+ "text_model.encoder.layers.10.self_attn.q_proj.weight",
50
+ "text_model.encoder.layers.10.self_attn.v_proj.bias",
51
+ "text_model.encoder.layers.10.self_attn.v_proj.weight",
52
+ "text_model.encoder.layers.11.layer_norm1.bias",
53
+ "text_model.encoder.layers.11.layer_norm1.weight",
54
+ "text_model.encoder.layers.11.layer_norm2.bias",
55
+ "text_model.encoder.layers.11.layer_norm2.weight",
56
+ "text_model.encoder.layers.11.mlp.fc1.bias",
57
+ "text_model.encoder.layers.11.mlp.fc1.weight",
58
+ "text_model.encoder.layers.11.mlp.fc2.bias",
59
+ "text_model.encoder.layers.11.mlp.fc2.weight",
60
+ "text_model.encoder.layers.11.self_attn.k_proj.bias",
61
+ "text_model.encoder.layers.11.self_attn.k_proj.weight",
62
+ "text_model.encoder.layers.11.self_attn.out_proj.bias",
63
+ "text_model.encoder.layers.11.self_attn.out_proj.weight",
64
+ "text_model.encoder.layers.11.self_attn.q_proj.bias",
65
+ "text_model.encoder.layers.11.self_attn.q_proj.weight",
66
+ "text_model.encoder.layers.11.self_attn.v_proj.bias",
67
+ "text_model.encoder.layers.11.self_attn.v_proj.weight",
68
+ "text_model.encoder.layers.2.layer_norm1.bias",
69
+ "text_model.encoder.layers.2.layer_norm1.weight",
70
+ "text_model.encoder.layers.2.layer_norm2.bias",
71
+ "text_model.encoder.layers.2.layer_norm2.weight",
72
+ "text_model.encoder.layers.2.mlp.fc1.bias",
73
+ "text_model.encoder.layers.2.mlp.fc1.weight",
74
+ "text_model.encoder.layers.2.mlp.fc2.bias",
75
+ "text_model.encoder.layers.2.mlp.fc2.weight",
76
+ "text_model.encoder.layers.2.self_attn.k_proj.bias",
77
+ "text_model.encoder.layers.2.self_attn.k_proj.weight",
78
+ "text_model.encoder.layers.2.self_attn.out_proj.bias",
79
+ "text_model.encoder.layers.2.self_attn.out_proj.weight",
80
+ "text_model.encoder.layers.2.self_attn.q_proj.bias",
81
+ "text_model.encoder.layers.2.self_attn.q_proj.weight",
82
+ "text_model.encoder.layers.2.self_attn.v_proj.bias",
83
+ "text_model.encoder.layers.2.self_attn.v_proj.weight",
84
+ "text_model.encoder.layers.3.layer_norm1.bias",
85
+ "text_model.encoder.layers.3.layer_norm1.weight",
86
+ "text_model.encoder.layers.3.layer_norm2.bias",
87
+ "text_model.encoder.layers.3.layer_norm2.weight",
88
+ "text_model.encoder.layers.3.mlp.fc1.bias",
89
+ "text_model.encoder.layers.3.mlp.fc1.weight",
90
+ "text_model.encoder.layers.3.mlp.fc2.bias",
91
+ "text_model.encoder.layers.3.mlp.fc2.weight",
92
+ "text_model.encoder.layers.3.self_attn.k_proj.bias",
93
+ "text_model.encoder.layers.3.self_attn.k_proj.weight",
94
+ "text_model.encoder.layers.3.self_attn.out_proj.bias",
95
+ "text_model.encoder.layers.3.self_attn.out_proj.weight",
96
+ "text_model.encoder.layers.3.self_attn.q_proj.bias",
97
+ "text_model.encoder.layers.3.self_attn.q_proj.weight",
98
+ "text_model.encoder.layers.3.self_attn.v_proj.bias",
99
+ "text_model.encoder.layers.3.self_attn.v_proj.weight",
100
+ "text_model.encoder.layers.4.layer_norm1.bias",
101
+ "text_model.encoder.layers.4.layer_norm1.weight",
102
+ "text_model.encoder.layers.4.layer_norm2.bias",
103
+ "text_model.encoder.layers.4.layer_norm2.weight",
104
+ "text_model.encoder.layers.4.mlp.fc1.bias",
105
+ "text_model.encoder.layers.4.mlp.fc1.weight",
106
+ "text_model.encoder.layers.4.mlp.fc2.bias",
107
+ "text_model.encoder.layers.4.mlp.fc2.weight",
108
+ "text_model.encoder.layers.4.self_attn.k_proj.bias",
109
+ "text_model.encoder.layers.4.self_attn.k_proj.weight",
110
+ "text_model.encoder.layers.4.self_attn.out_proj.bias",
111
+ "text_model.encoder.layers.4.self_attn.out_proj.weight",
112
+ "text_model.encoder.layers.4.self_attn.q_proj.bias",
113
+ "text_model.encoder.layers.4.self_attn.q_proj.weight",
114
+ "text_model.encoder.layers.4.self_attn.v_proj.bias",
115
+ "text_model.encoder.layers.4.self_attn.v_proj.weight",
116
+ "text_model.encoder.layers.5.layer_norm1.bias",
117
+ "text_model.encoder.layers.5.layer_norm1.weight",
118
+ "text_model.encoder.layers.5.layer_norm2.bias",
119
+ "text_model.encoder.layers.5.layer_norm2.weight",
120
+ "text_model.encoder.layers.5.mlp.fc1.bias",
121
+ "text_model.encoder.layers.5.mlp.fc1.weight",
122
+ "text_model.encoder.layers.5.mlp.fc2.bias",
123
+ "text_model.encoder.layers.5.mlp.fc2.weight",
124
+ "text_model.encoder.layers.5.self_attn.k_proj.bias",
125
+ "text_model.encoder.layers.5.self_attn.k_proj.weight",
126
+ "text_model.encoder.layers.5.self_attn.out_proj.bias",
127
+ "text_model.encoder.layers.5.self_attn.out_proj.weight",
128
+ "text_model.encoder.layers.5.self_attn.q_proj.bias",
129
+ "text_model.encoder.layers.5.self_attn.q_proj.weight",
130
+ "text_model.encoder.layers.5.self_attn.v_proj.bias",
131
+ "text_model.encoder.layers.5.self_attn.v_proj.weight",
132
+ "text_model.encoder.layers.6.layer_norm1.bias",
133
+ "text_model.encoder.layers.6.layer_norm1.weight",
134
+ "text_model.encoder.layers.6.layer_norm2.bias",
135
+ "text_model.encoder.layers.6.layer_norm2.weight",
136
+ "text_model.encoder.layers.6.mlp.fc1.bias",
137
+ "text_model.encoder.layers.6.mlp.fc1.weight",
138
+ "text_model.encoder.layers.6.mlp.fc2.bias",
139
+ "text_model.encoder.layers.6.mlp.fc2.weight",
140
+ "text_model.encoder.layers.6.self_attn.k_proj.bias",
141
+ "text_model.encoder.layers.6.self_attn.k_proj.weight",
142
+ "text_model.encoder.layers.6.self_attn.out_proj.bias",
143
+ "text_model.encoder.layers.6.self_attn.out_proj.weight",
144
+ "text_model.encoder.layers.6.self_attn.q_proj.bias",
145
+ "text_model.encoder.layers.6.self_attn.q_proj.weight",
146
+ "text_model.encoder.layers.6.self_attn.v_proj.bias",
147
+ "text_model.encoder.layers.6.self_attn.v_proj.weight",
148
+ "text_model.encoder.layers.7.layer_norm1.bias",
149
+ "text_model.encoder.layers.7.layer_norm1.weight",
150
+ "text_model.encoder.layers.7.layer_norm2.bias",
151
+ "text_model.encoder.layers.7.layer_norm2.weight",
152
+ "text_model.encoder.layers.7.mlp.fc1.bias",
153
+ "text_model.encoder.layers.7.mlp.fc1.weight",
154
+ "text_model.encoder.layers.7.mlp.fc2.bias",
155
+ "text_model.encoder.layers.7.mlp.fc2.weight",
156
+ "text_model.encoder.layers.7.self_attn.k_proj.bias",
157
+ "text_model.encoder.layers.7.self_attn.k_proj.weight",
158
+ "text_model.encoder.layers.7.self_attn.out_proj.bias",
159
+ "text_model.encoder.layers.7.self_attn.out_proj.weight",
160
+ "text_model.encoder.layers.7.self_attn.q_proj.bias",
161
+ "text_model.encoder.layers.7.self_attn.q_proj.weight",
162
+ "text_model.encoder.layers.7.self_attn.v_proj.bias",
163
+ "text_model.encoder.layers.7.self_attn.v_proj.weight",
164
+ "text_model.encoder.layers.8.layer_norm1.bias",
165
+ "text_model.encoder.layers.8.layer_norm1.weight",
166
+ "text_model.encoder.layers.8.layer_norm2.bias",
167
+ "text_model.encoder.layers.8.layer_norm2.weight",
168
+ "text_model.encoder.layers.8.mlp.fc1.bias",
169
+ "text_model.encoder.layers.8.mlp.fc1.weight",
170
+ "text_model.encoder.layers.8.mlp.fc2.bias",
171
+ "text_model.encoder.layers.8.mlp.fc2.weight",
172
+ "text_model.encoder.layers.8.self_attn.k_proj.bias",
173
+ "text_model.encoder.layers.8.self_attn.k_proj.weight",
174
+ "text_model.encoder.layers.8.self_attn.out_proj.bias",
175
+ "text_model.encoder.layers.8.self_attn.out_proj.weight",
176
+ "text_model.encoder.layers.8.self_attn.q_proj.bias",
177
+ "text_model.encoder.layers.8.self_attn.q_proj.weight",
178
+ "text_model.encoder.layers.8.self_attn.v_proj.bias",
179
+ "text_model.encoder.layers.8.self_attn.v_proj.weight",
180
+ "text_model.encoder.layers.9.layer_norm1.bias",
181
+ "text_model.encoder.layers.9.layer_norm1.weight",
182
+ "text_model.encoder.layers.9.layer_norm2.bias",
183
+ "text_model.encoder.layers.9.layer_norm2.weight",
184
+ "text_model.encoder.layers.9.mlp.fc1.bias",
185
+ "text_model.encoder.layers.9.mlp.fc1.weight",
186
+ "text_model.encoder.layers.9.mlp.fc2.bias",
187
+ "text_model.encoder.layers.9.mlp.fc2.weight",
188
+ "text_model.encoder.layers.9.self_attn.k_proj.bias",
189
+ "text_model.encoder.layers.9.self_attn.k_proj.weight",
190
+ "text_model.encoder.layers.9.self_attn.out_proj.bias",
191
+ "text_model.encoder.layers.9.self_attn.out_proj.weight",
192
+ "text_model.encoder.layers.9.self_attn.q_proj.bias",
193
+ "text_model.encoder.layers.9.self_attn.q_proj.weight",
194
+ "text_model.encoder.layers.9.self_attn.v_proj.bias",
195
+ "text_model.encoder.layers.9.self_attn.v_proj.weight",
196
+ "text_model.final_layer_norm.bias",
197
+ "text_model.final_layer_norm.weight",
198
+ "text_projection.weight",
199
  "text_encoders.clip_l.transformer.text_model.embeddings.position_embedding.weight",
200
  "text_encoders.clip_l.transformer.text_model.embeddings.token_embedding.weight",
201
  "text_encoders.clip_l.transformer.text_model.encoder.layers.0.layer_norm1.bias",