jingyaogong commited on
Commit
5430157
1 Parent(s): f819c1d

Upload 9 files

Browse files
LMConfig.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import PretrainedConfig
2
+ from typing import List
3
+
4
+
5
+ class LMConfig(PretrainedConfig):
6
+ model_type = "minimind"
7
+
8
+ def __init__(
9
+ self,
10
+ dim: int = 768,
11
+ n_layers: int = 16,
12
+ n_heads: int = 16,
13
+ n_kv_heads: int = 8,
14
+ vocab_size: int = 6400,
15
+ hidden_dim: int = None,
16
+ multiple_of: int = 64,
17
+ norm_eps: float = 1e-5,
18
+ max_seq_len: int = 512,
19
+ dropout: float = 0.0,
20
+ flash_attn: bool = True,
21
+ ####################################################
22
+ # Here are the specific configurations of MOE
23
+ # When use_moe is false, the following is invalid
24
+ ####################################################
25
+ use_moe: bool = False,
26
+ num_experts_per_tok=2,
27
+ n_routed_experts=4,
28
+ n_shared_experts: bool = True,
29
+ scoring_func='softmax',
30
+ aux_loss_alpha=0.01,
31
+ seq_aux=True,
32
+ norm_topk_prob=True,
33
+ **kwargs,
34
+ ):
35
+ self.dim = dim
36
+ self.n_layers = n_layers
37
+ self.n_heads = n_heads
38
+ self.n_kv_heads = n_kv_heads
39
+ self.vocab_size = vocab_size
40
+ self.hidden_dim = hidden_dim
41
+ self.multiple_of = multiple_of
42
+ self.norm_eps = norm_eps
43
+ self.max_seq_len = max_seq_len
44
+ self.dropout = dropout
45
+ self.flash_attn = flash_attn
46
+ ####################################################
47
+ # Here are the specific configurations of MOE
48
+ # When use_moe is false, the following is invalid
49
+ ####################################################
50
+ self.use_moe = use_moe
51
+ self.num_experts_per_tok = num_experts_per_tok # 每个token选择的专家数量
52
+ self.n_routed_experts = n_routed_experts # 总的专家数量
53
+ self.n_shared_experts = n_shared_experts # 共享专家
54
+ self.scoring_func = scoring_func # 评分函数,默认为'softmax'
55
+ self.aux_loss_alpha = aux_loss_alpha # 辅助损失的alpha参数
56
+ self.seq_aux = seq_aux # 是否在序列级别上计算辅助损失
57
+ self.norm_topk_prob = norm_topk_prob # 是否标准化top-k概率
58
+ super().__init__(**kwargs)
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Transformer"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "LMConfig.LMConfig",
7
+ "AutoModelForCausalLM": "model.Transformer"
8
+ },
9
+ "aux_loss_alpha": 0.01,
10
+ "dim": 768,
11
+ "dropout": 0.0,
12
+ "flash_attn": true,
13
+ "hidden_dim": null,
14
+ "max_seq_len": 512,
15
+ "model_type": "minimind",
16
+ "multiple_of": 64,
17
+ "n_heads": 16,
18
+ "n_kv_heads": 8,
19
+ "n_layers": 16,
20
+ "n_routed_experts": 4,
21
+ "n_shared_experts": true,
22
+ "norm_eps": 1e-05,
23
+ "norm_topk_prob": true,
24
+ "num_experts_per_tok": 2,
25
+ "scoring_func": "softmax",
26
+ "seq_aux": true,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.37.2",
29
+ "use_moe": false,
30
+ "vocab_size": 6400
31
+ }
generation_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "transformers_version": "4.37.2"
4
+ }
model.py ADDED
@@ -0,0 +1,530 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ import struct
3
+ import inspect
4
+ from .LMConfig import LMConfig
5
+ from typing import Any, Optional, Tuple
6
+ import numpy as np
7
+ import torch
8
+ import torch.nn.functional as F
9
+ from torch import nn
10
+ from transformers import PreTrainedModel
11
+ from transformers.modeling_outputs import CausalLMOutputWithPast
12
+
13
+
14
+ class RMSNorm(torch.nn.Module):
15
+ def __init__(self, dim: int, eps: float):
16
+ super().__init__()
17
+ self.eps = eps
18
+ self.weight = nn.Parameter(torch.ones(dim))
19
+
20
+ def _norm(self, x):
21
+ return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)
22
+
23
+ def forward(self, x):
24
+ output = self._norm(x.float()).type_as(x)
25
+ return output * self.weight
26
+
27
+
28
+ def precompute_pos_cis(dim: int, end: int, theta: float = 10000.0):
29
+ freqs = 1.0 / (theta ** (torch.arange(0, dim, 2)[: (dim // 2)].float() / dim))
30
+ t = torch.arange(end, device=freqs.device) # type: ignore
31
+ freqs = torch.outer(t, freqs).float() # type: ignore
32
+ pos_cis = torch.polar(torch.ones_like(freqs), freqs) # complex64
33
+ return pos_cis
34
+
35
+
36
+ def apply_rotary_emb(xq, xk, pos_cis):
37
+ def unite_shape(pos_cis, x):
38
+ ndim = x.ndim
39
+ assert 0 <= 1 < ndim
40
+ assert pos_cis.shape == (x.shape[1], x.shape[-1])
41
+ shape = [d if i == 1 or i == ndim - 1 else 1 for i, d in enumerate(x.shape)]
42
+ return pos_cis.view(*shape)
43
+
44
+ xq_ = torch.view_as_complex(xq.float().reshape(*xq.shape[:-1], -1, 2))
45
+ xk_ = torch.view_as_complex(xk.float().reshape(*xk.shape[:-1], -1, 2))
46
+ pos_cis = unite_shape(pos_cis, xq_)
47
+ xq_out = torch.view_as_real(xq_ * pos_cis).flatten(3)
48
+ xk_out = torch.view_as_real(xk_ * pos_cis).flatten(3)
49
+ return xq_out.type_as(xq), xk_out.type_as(xk)
50
+
51
+
52
+ def repeat_kv(x: torch.Tensor, n_rep: int) -> torch.Tensor:
53
+ """torch.repeat_interleave(x, dim=2, repeats=n_rep)"""
54
+ bs, slen, n_kv_heads, head_dim = x.shape
55
+ if n_rep == 1:
56
+ return x
57
+ return (
58
+ x[:, :, :, None, :]
59
+ .expand(bs, slen, n_kv_heads, n_rep, head_dim)
60
+ .reshape(bs, slen, n_kv_heads * n_rep, head_dim)
61
+ )
62
+
63
+
64
+ class Attention(nn.Module):
65
+ def __init__(self, args: LMConfig):
66
+ super().__init__()
67
+ self.n_kv_heads = args.n_heads if args.n_kv_heads is None else args.n_kv_heads
68
+ assert args.n_heads % self.n_kv_heads == 0
69
+ model_parallel_size = 1
70
+ self.n_local_heads = args.n_heads // model_parallel_size
71
+ self.n_local_kv_heads = self.n_kv_heads // model_parallel_size
72
+ self.n_rep = self.n_local_heads // self.n_local_kv_heads
73
+ self.head_dim = args.dim // args.n_heads
74
+ self.wq = nn.Linear(args.dim, args.n_heads * self.head_dim, bias=False)
75
+ self.wk = nn.Linear(args.dim, self.n_kv_heads * self.head_dim, bias=False)
76
+ self.wv = nn.Linear(args.dim, self.n_kv_heads * self.head_dim, bias=False)
77
+ self.wo = nn.Linear(args.n_heads * self.head_dim, args.dim, bias=False)
78
+ self.attn_dropout = nn.Dropout(args.dropout)
79
+ self.resid_dropout = nn.Dropout(args.dropout)
80
+ self.dropout = args.dropout
81
+
82
+ # use flash attention or a manual implementation?
83
+ self.flash = hasattr(torch.nn.functional, 'scaled_dot_product_attention') and args.flash_attn
84
+
85
+ if not self.flash:
86
+ # print("WARNING: using slow attention. Flash Attention requires PyTorch >= 2.0")
87
+ mask = torch.full((1, 1, args.max_seq_len, args.max_seq_len), float("-inf"))
88
+ mask = torch.triu(mask, diagonal=1)
89
+ self.register_buffer("mask", mask)
90
+
91
+ def forward(
92
+ self,
93
+ x: torch.Tensor,
94
+ pos_cis: torch.Tensor,
95
+ use_kv_cache: bool = False,
96
+ past_kv: Tuple[torch.Tensor] = None
97
+ ):
98
+ bsz, seqlen, _ = x.shape
99
+ # QKV
100
+ # inference
101
+ if use_kv_cache:
102
+ # 只计算最后一个token的Q
103
+ current_token = x[:, -1:, :]
104
+
105
+ if not past_kv:
106
+ xq = self.wq(x)
107
+ xk, xv = self.wk(x), self.wv(x)
108
+ else:
109
+ past_key, past_value = past_kv
110
+ xq = torch.cat((torch.zeros_like(x[:, :-1, :]), self.wq(current_token)), dim=1)
111
+ xk = torch.cat((past_key, self.wk(current_token)), dim=1)
112
+ xv = torch.cat((past_value, self.wv(current_token)), dim=1)
113
+
114
+ past_kv = (xk, xv)
115
+ else:
116
+ xq = self.wq(x)
117
+ xk, xv = self.wk(x), self.wv(x)
118
+
119
+ xq = xq.view(bsz, seqlen, self.n_local_heads, self.head_dim)
120
+ xk = xk.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
121
+ xv = xv.view(bsz, seqlen, self.n_local_kv_heads, self.head_dim)
122
+
123
+ # RoPE relative positional embeddings
124
+ xq, xk = apply_rotary_emb(xq, xk, pos_cis)
125
+
126
+ # grouped multiquery attention: expand out keys and values
127
+ xk = repeat_kv(xk, self.n_rep) # (bs, seqlen, n_local_heads, head_dim)
128
+ xv = repeat_kv(xv, self.n_rep) # (bs, seqlen, n_local_heads, head_dim)
129
+
130
+ # make heads into a batch dimension
131
+ xq = xq.transpose(1, 2) # (bs, n_local_heads, seqlen, head_dim)
132
+ xk = xk.transpose(1, 2)
133
+ xv = xv.transpose(1, 2)
134
+
135
+ # flash implementation
136
+ if self.flash:
137
+ output = torch.nn.functional.scaled_dot_product_attention(xq, xk, xv, attn_mask=None,
138
+ dropout_p=self.dropout if self.training else 0.0,
139
+ is_causal=True)
140
+ else:
141
+ # manual implementation
142
+ scores = torch.matmul(xq, xk.transpose(2, 3)) / math.sqrt(self.head_dim)
143
+ assert hasattr(self, 'mask')
144
+ scores = scores + self.mask[:, :, :seqlen, :seqlen] # (bs, n_local_heads, seqlen, cache_len + seqlen)
145
+ scores = F.softmax(scores.float(), dim=-1).type_as(xq)
146
+ scores = self.attn_dropout(scores)
147
+ output = torch.matmul(scores, xv) # (bs, n_local_heads, seqlen, head_dim)
148
+
149
+ # restore time as batch dimension and concat heads
150
+ output = output.transpose(1, 2).contiguous().view(bsz, seqlen, -1)
151
+
152
+ # final projection into the residual stream
153
+ output = self.wo(output)
154
+ output = self.resid_dropout(output)
155
+ return output, past_kv
156
+
157
+
158
+ class FeedForward(nn.Module):
159
+ def __init__(self, dim: int, hidden_dim: int, multiple_of: int, dropout: float):
160
+ super().__init__()
161
+ if hidden_dim is None:
162
+ hidden_dim = 4 * dim
163
+ hidden_dim = int(2 * hidden_dim / 3)
164
+ hidden_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of)
165
+ self.w1 = nn.Linear(dim, hidden_dim, bias=False)
166
+ self.w2 = nn.Linear(hidden_dim, dim, bias=False)
167
+ self.w3 = nn.Linear(dim, hidden_dim, bias=False)
168
+ self.dropout = nn.Dropout(dropout)
169
+
170
+ def forward(self, x):
171
+ return self.dropout(self.w2(F.silu(self.w1(x)) * self.w3(x)))
172
+
173
+
174
+ class MoEGate(nn.Module):
175
+ def __init__(self, config: LMConfig):
176
+ super().__init__()
177
+ self.config = config
178
+ self.top_k = config.num_experts_per_tok
179
+ self.n_routed_experts = config.n_routed_experts
180
+
181
+ self.scoring_func = config.scoring_func
182
+ self.alpha = config.aux_loss_alpha
183
+ self.seq_aux = config.seq_aux
184
+
185
+ # topk selection algorithm
186
+ self.norm_topk_prob = config.norm_topk_prob
187
+ self.gating_dim = config.dim
188
+ self.weight = nn.Parameter(torch.empty((self.n_routed_experts, self.gating_dim)))
189
+ self.reset_parameters()
190
+
191
+ def reset_parameters(self) -> None:
192
+ import torch.nn.init as init
193
+ init.kaiming_uniform_(self.weight, a=math.sqrt(5))
194
+
195
+ def forward(self, hidden_states):
196
+ bsz, seq_len, h = hidden_states.shape
197
+ ### compute gating score
198
+ hidden_states = hidden_states.view(-1, h)
199
+ logits = F.linear(hidden_states, self.weight, None)
200
+ if self.scoring_func == 'softmax':
201
+ scores = logits.softmax(dim=-1)
202
+ else:
203
+ raise NotImplementedError(f'insupportable scoring function for MoE gating: {self.scoring_func}')
204
+
205
+ ### select top-k experts
206
+ topk_weight, topk_idx = torch.topk(scores, k=self.top_k, dim=-1, sorted=False)
207
+
208
+ ### norm gate to sum 1
209
+ if self.top_k > 1 and self.norm_topk_prob:
210
+ denominator = topk_weight.sum(dim=-1, keepdim=True) + 1e-20
211
+ topk_weight = topk_weight / denominator
212
+
213
+ ### expert-level computation auxiliary loss
214
+ if self.training and self.alpha > 0.0:
215
+ scores_for_aux = scores
216
+ aux_topk = self.top_k
217
+ # always compute aux loss based on the naive greedy topk method
218
+ topk_idx_for_aux_loss = topk_idx.view(bsz, -1)
219
+ if self.seq_aux:
220
+ scores_for_seq_aux = scores_for_aux.view(bsz, seq_len, -1)
221
+ ce = torch.zeros(bsz, self.n_routed_experts, device=hidden_states.device)
222
+ ce.scatter_add_(1, topk_idx_for_aux_loss,
223
+ torch.ones(bsz, seq_len * aux_topk, device=hidden_states.device)).div_(
224
+ seq_len * aux_topk / self.n_routed_experts)
225
+ aux_loss = (ce * scores_for_seq_aux.mean(dim=1)).sum(dim=1).mean() * self.alpha
226
+ else:
227
+ mask_ce = F.one_hot(topk_idx_for_aux_loss.view(-1), num_classes=self.n_routed_experts)
228
+ ce = mask_ce.float().mean(0)
229
+ Pi = scores_for_aux.mean(0)
230
+ fi = ce * self.n_routed_experts
231
+ aux_loss = (Pi * fi).sum() * self.alpha
232
+ else:
233
+ aux_loss = None
234
+ return topk_idx, topk_weight, aux_loss
235
+
236
+
237
+ class MOEFeedForward(nn.Module):
238
+ def __init__(self, config: LMConfig):
239
+ super().__init__()
240
+ self.config = config
241
+ self.experts = nn.ModuleList([
242
+ FeedForward(
243
+ dim=config.dim,
244
+ hidden_dim=config.hidden_dim,
245
+ multiple_of=config.multiple_of,
246
+ dropout=config.dropout,
247
+ )
248
+ for _ in range(config.n_routed_experts)
249
+ ])
250
+
251
+ self.gate = MoEGate(config)
252
+ if config.n_shared_experts is not None:
253
+ self.shared_experts = FeedForward(
254
+ dim=config.dim,
255
+ hidden_dim=config.hidden_dim,
256
+ multiple_of=config.multiple_of,
257
+ dropout=config.dropout,
258
+ )
259
+
260
+ def forward(self, x):
261
+ identity = x
262
+ orig_shape = x.shape
263
+ bsz, seq_len, _ = x.shape
264
+
265
+ # 使用门控机制选择专家
266
+ topk_idx, topk_weight, aux_loss = self.gate(x)
267
+
268
+ x = x.view(-1, x.shape[-1])
269
+ flat_topk_idx = topk_idx.view(-1)
270
+
271
+ if self.training:
272
+ # 训练模式下,重复输入数据
273
+ x = x.repeat_interleave(self.config.num_experts_per_tok, dim=0)
274
+ y = torch.empty_like(x, dtype=torch.float16)
275
+ for i, expert in enumerate(self.experts):
276
+ y[flat_topk_idx == i] = expert(x[flat_topk_idx == i])
277
+ y = (y.view(*topk_weight.shape, -1) * topk_weight.unsqueeze(-1)).sum(dim=1)
278
+ y = y.view(*orig_shape)
279
+ else:
280
+ # 推理模式下,只选择最优专家
281
+ y = self.moe_infer(x, flat_topk_idx, topk_weight.view(-1, 1)).view(*orig_shape)
282
+
283
+ if self.config.n_shared_experts is not None:
284
+ y = y + self.shared_experts(identity)
285
+
286
+ return y
287
+
288
+ @torch.no_grad()
289
+ def moe_infer(self, x, flat_expert_indices, flat_expert_weights):
290
+ expert_cache = torch.zeros_like(x)
291
+ idxs = flat_expert_indices.argsort()
292
+ tokens_per_expert = flat_expert_indices.bincount().cpu().numpy().cumsum(0)
293
+ token_idxs = idxs // self.config.num_experts_per_tok
294
+ # 例如当tokens_per_expert=[6, 15, 20, 26, 33, 38, 46, 52]
295
+ # 当token_idxs=[3, 7, 19, 21, 24, 25, 4, 5, 6, 10, 11, 12...]
296
+ # 意味着当token_idxs[:6] -> [3, 7, 19, 21, 24, 25, 4]位置的token都由专家0处理,token_idxs[6:15]位置的token都由专家1处理......
297
+ for i, end_idx in enumerate(tokens_per_expert):
298
+ start_idx = 0 if i == 0 else tokens_per_expert[i - 1]
299
+ if start_idx == end_idx:
300
+ continue
301
+ expert = self.experts[i]
302
+ exp_token_idx = token_idxs[start_idx:end_idx]
303
+ expert_tokens = x[exp_token_idx]
304
+ expert_out = expert(expert_tokens)
305
+ expert_out.mul_(flat_expert_weights[idxs[start_idx:end_idx]])
306
+ # 使用 scatter_add_ 进行 sum 操作
307
+ expert_cache.scatter_add_(0, exp_token_idx.view(-1, 1).repeat(1, x.shape[-1]), expert_out)
308
+
309
+ return expert_cache
310
+
311
+
312
+ class TransformerBlock(nn.Module):
313
+ def __init__(self, layer_id: int, args: LMConfig):
314
+ super().__init__()
315
+ self.n_heads = args.n_heads
316
+ self.dim = args.dim
317
+ self.head_dim = args.dim // args.n_heads
318
+ self.attention = Attention(args)
319
+
320
+ self.layer_id = layer_id
321
+ self.attention_norm = RMSNorm(args.dim, eps=args.norm_eps)
322
+ self.ffn_norm = RMSNorm(args.dim, eps=args.norm_eps)
323
+
324
+ if args.use_moe:
325
+ self.feed_forward = MOEFeedForward(args)
326
+ else:
327
+ self.feed_forward = FeedForward(
328
+ dim=args.dim,
329
+ hidden_dim=args.hidden_dim,
330
+ multiple_of=args.multiple_of,
331
+ dropout=args.dropout,
332
+ )
333
+
334
+ def forward(self, x, pos_cis, use_kv_cache=False, past_kv: Tuple[torch.Tensor] = None):
335
+ attn_res, past_kv = self.attention(self.attention_norm(x), pos_cis, use_kv_cache, past_kv)
336
+ h = x + attn_res
337
+ out = h + self.feed_forward(self.ffn_norm(h))
338
+ return out, past_kv
339
+
340
+
341
+ class Transformer(PreTrainedModel):
342
+ config_class = LMConfig
343
+ last_loss: Optional[torch.Tensor]
344
+
345
+ def __init__(self, params: LMConfig = None):
346
+ super().__init__(params)
347
+ if not params:
348
+ params = LMConfig()
349
+ self.params = params
350
+ self.vocab_size = params.vocab_size
351
+ self.n_layers = params.n_layers
352
+
353
+ self.tok_embeddings = nn.Embedding(params.vocab_size, params.dim)
354
+ self.dropout = nn.Dropout(params.dropout)
355
+ self.layers = torch.nn.ModuleList()
356
+ for layer_id in range(self.n_layers):
357
+ self.layers.append(TransformerBlock(layer_id, params))
358
+ self.norm = RMSNorm(params.dim, eps=params.norm_eps)
359
+ self.output = nn.Linear(params.dim, params.vocab_size, bias=False)
360
+
361
+ # share the unembedding parameters with the embedding parameters
362
+ self.tok_embeddings.weight = self.output.weight # https://paperswithcode.com/method/weight-tying
363
+
364
+ # some useful precompute for the RoPE relative positional embeddings
365
+ pos_cis = precompute_pos_cis(self.params.dim // self.params.n_heads, self.params.max_seq_len)
366
+ self.register_buffer("pos_cis", pos_cis, persistent=False)
367
+
368
+ # init all weights
369
+ self.apply(self._init_weights)
370
+ # apply special scaled init to the residual projections, per GPT-2 paper
371
+ for pn, p in self.named_parameters():
372
+ if pn.endswith('w3.weight') or pn.endswith('wo.weight'):
373
+ torch.nn.init.normal_(p, mean=0.0, std=0.02 / math.sqrt(2 * params.n_layers))
374
+
375
+ # Initialize attribute for the loss of the last forward call. This will be set if the forward is called with a targets tensor.
376
+ self.last_loss = None
377
+ self.OUT = CausalLMOutputWithPast()
378
+
379
+ def _init_weights(self, module):
380
+ if isinstance(module, nn.Linear):
381
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
382
+ if module.bias is not None:
383
+ torch.nn.init.zeros_(module.bias)
384
+ elif isinstance(module, nn.Embedding):
385
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
386
+
387
+ def forward(self, tokens: Optional[torch.Tensor] = None,
388
+ targets: Optional[torch.Tensor] = None,
389
+ use_kv_cache=False, past_kvs=None, **keyargs):
390
+ if past_kvs is None:
391
+ past_kvs = [None for _ in range(self.n_layers)]
392
+ if 'input_ids' in keyargs:
393
+ tokens = keyargs['input_ids']
394
+ if 'attention_mask' in keyargs:
395
+ targets = keyargs['attention_mask']
396
+
397
+ _bsz, seqlen = tokens.shape
398
+ h = self.tok_embeddings(tokens)
399
+ h = self.dropout(h)
400
+ pos_cis = self.pos_cis[:seqlen]
401
+ for idx, layer in enumerate(self.layers):
402
+ h, past_kvs[idx] = layer(h, pos_cis, use_kv_cache, past_kvs[idx])
403
+
404
+ h = self.norm(h)
405
+
406
+ if targets is not None:
407
+ # if we are given some desired targets also calculate the loss
408
+ logits = self.output(h)
409
+ self.last_loss = F.cross_entropy(logits.view(-1, logits.size(-1)), targets.view(-1), ignore_index=-1)
410
+ else:
411
+ # inference-time mini-optimization: only forward the output on the very last position
412
+ logits = self.output(h[:, [-1], :]) # note: using list [-1] to preserve the time dim
413
+ self.last_loss = None
414
+
415
+ self.OUT.__setitem__('logits', logits)
416
+ self.OUT.__setitem__('last_loss', self.last_loss)
417
+
418
+ if use_kv_cache:
419
+ return self.OUT, past_kvs
420
+ return self.OUT
421
+
422
+
423
+ @torch.inference_mode()
424
+ def generate(self, idx, eos, max_new_tokens, temperature=0.7, top_k=None, stream=True, repetition_penalty=1.):
425
+ index = idx.shape[1]
426
+ use_kv_cache = True
427
+ past_kvs = [None for _ in range(self.n_layers)]
428
+ while idx.shape[1] < max_new_tokens - 1:
429
+ # if the sequence context is growing too long we must crop it at block_size
430
+ idx_cond = idx # if idx.size(1) <= self.params.max_seq_len else idx[:, -self.params.max_seq_len:]
431
+ # forward the model to get the logits for the index in the sequence
432
+ inference_res = self(idx_cond, use_kv_cache=use_kv_cache, past_kvs=past_kvs)
433
+ if use_kv_cache:
434
+ logits, past_kvs = inference_res[0].logits, inference_res[1]
435
+ else:
436
+ logits = inference_res.logits
437
+
438
+ logits = logits[:, -1, :] # crop to just the final time step
439
+
440
+ # Apply repetition penalty
441
+ for token in set(idx.tolist()[0]):
442
+ logits[:, token] /= repetition_penalty
443
+
444
+ if temperature == 0.0:
445
+ # "sample" the single most likely index
446
+ __, idx_next = torch.topk(logits, k=1, dim=-1)
447
+ else:
448
+ # pluck the logits at the final step and scale by desired temperature
449
+ logits = logits / temperature
450
+ # optionally crop the logits to only the top k options
451
+ if top_k is not None:
452
+ v, __ = torch.topk(logits, min(top_k, logits.size(-1)))
453
+ logits[logits < v[:, [-1]]] = -float('Inf')
454
+
455
+ # apply softmax to convert logits to (normalized) probabilities
456
+ probs = F.softmax(logits, dim=-1)
457
+ idx_next = torch.multinomial(probs, num_samples=1, generator=None)
458
+ # append sampled index to the running sequence and continue
459
+ if idx_next == eos:
460
+ break
461
+
462
+ idx = torch.cat((idx, idx_next), dim=1)
463
+ if stream:
464
+ yield idx[:, index:]
465
+
466
+ if not stream:
467
+ yield idx[:, index:]
468
+
469
+ @torch.inference_mode()
470
+ def eval_answer(self, idx):
471
+ # if the sequence context is growing too long we must crop it at block_size
472
+ idx_cond = idx if idx.size(1) <= self.params.max_seq_len else idx[:, -self.params.max_seq_len:]
473
+ # forward the model to get the logits for the index in the sequence
474
+ past_kvs = [None for _ in range(self.n_layers)]
475
+ inference_res = self(idx_cond, use_kv_cache=False, past_kvs=past_kvs)
476
+ logits = inference_res.logits
477
+ logits = logits[:, -1, :]
478
+ return logits
479
+
480
+ def export(self, filepath='model.bin'):
481
+ """export the model weights in fp32 into .bin file to be read from C"""
482
+ f = open(filepath, 'wb')
483
+
484
+ def serialize(t):
485
+ d = t.detach().cpu().view(-1).numpy().astype(np.float32)
486
+ b = struct.pack(f'{len(d)}f', *d)
487
+ f.write(b)
488
+
489
+ # first write out the header
490
+ hidden_dim = self.layers[0].feed_forward.w1.weight.shape[0]
491
+ p = self.params
492
+ n_kv_heads = p.n_heads if p.n_kv_heads is None else p.n_kv_heads
493
+ header = struct.pack('iiiiiii', p.dim, hidden_dim, p.n_layers, p.n_heads,
494
+ n_kv_heads, p.vocab_size, p.max_seq_len)
495
+ f.write(header)
496
+
497
+ # next write out the embedding weights
498
+ serialize(self.tok_embeddings.weight)
499
+
500
+ # now all the layers
501
+ # attention weights
502
+ for layer in self.layers:
503
+ serialize(layer.attention_norm.weight)
504
+ for layer in self.layers:
505
+ serialize(layer.attention.wq.weight)
506
+ for layer in self.layers:
507
+ serialize(layer.attention.wk.weight)
508
+ for layer in self.layers:
509
+ serialize(layer.attention.wv.weight)
510
+ for layer in self.layers:
511
+ serialize(layer.attention.wo.weight)
512
+ # ffn weights
513
+ for layer in self.layers:
514
+ serialize(layer.ffn_norm.weight)
515
+ for layer in self.layers:
516
+ serialize(layer.feed_forward.w1.weight)
517
+ for layer in self.layers:
518
+ serialize(layer.feed_forward.w2.weight)
519
+ for layer in self.layers:
520
+ serialize(layer.feed_forward.w3.weight)
521
+ # final rmsnorm
522
+ serialize(self.norm.weight)
523
+ # note: no need to write final classifier weights due to weight sharing
524
+ # pos_cis
525
+ serialize(self.freqs_cos[:p.max_seq_len])
526
+ serialize(self.freqs_sin[:p.max_seq_len])
527
+
528
+ # write to binary file
529
+ f.close()
530
+ print(f"wrote {filepath}")
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39a113ff16b99a47a96737ff1b9957d6adc3e851aeb33c9115c52dcd11906c07
3
+ size 435044370
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "unk_token": {
17
+ "content": "<unk>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dadfd56d766715c61d2ef780a525ab43b8e6da4de6865bda3d95fdef5e134055
3
+ size 493443
tokenizer_config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": true,
5
+ "added_tokens_decoder": {
6
+ "0": {
7
+ "content": "<unk>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "1": {
15
+ "content": "<s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "2": {
23
+ "content": "</s>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ }
30
+ },
31
+ "additional_special_tokens": [],
32
+ "bos_token": "<s>",
33
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ system_message }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<s>user\\n' + content + '</s>\\n<s>assistant\\n' }}{% elif message['role'] == 'assistant' %}{{ content + '</s>' + '\\n' }}{% endif %}{% endfor %}",
34
+ "clean_up_tokenization_spaces": false,
35
+ "eos_token": "</s>",
36
+ "legacy": true,
37
+ "model_max_length": 1000000000000000019884624838656,
38
+ "pad_token": null,
39
+ "sp_model_kwargs": {},
40
+ "spaces_between_special_tokens": false,
41
+ "tokenizer_class": "PreTrainedTokenizerFast",
42
+ "unk_token": "<unk>",
43
+ "use_default_system_prompt": false
44
+ }