jeiku commited on
Commit
2a2662f
1 Parent(s): b9ab93f

Delete modeling_stablelm_epoch.py

Browse files
Files changed (1) hide show
  1. modeling_stablelm_epoch.py +0 -916
modeling_stablelm_epoch.py DELETED
@@ -1,916 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 Stability AI, EleutherAI, and The HuggingFace Inc. team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- #
16
- # This code is based off the following work:
17
- # https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py
18
- # https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py
19
- """ PyTorch StableLM Epoch model. """
20
- from typing import Optional, Tuple, Union
21
- import math
22
- import warnings
23
-
24
- import torch
25
- import torch.nn.functional as F
26
- import torch.utils.checkpoint
27
- from torch import nn
28
- from torch.nn import CrossEntropyLoss
29
-
30
- from transformers.cache_utils import Cache
31
- from transformers.modeling_outputs import (
32
- BaseModelOutputWithPast,
33
- CausalLMOutputWithPast,
34
- )
35
- from transformers.modeling_utils import PreTrainedModel
36
- from transformers.utils import logging, is_flash_attn_greater_or_equal_2_10
37
-
38
- from .configuration_stablelm_epoch import StableLMEpochConfig
39
-
40
- try:
41
- from flash_attn import flash_attn_func, flash_attn_varlen_func
42
- from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input
43
- except:
44
- flash_attn_func, flash_attn_varlen_func = None, None
45
- index_first_axis, pad_input, unpad_input = None, None, None
46
-
47
-
48
- logger = logging.get_logger(__name__)
49
-
50
-
51
- # Copied from transformers.models.llama.modeling_llama._get_unpad_data
52
- def _get_unpad_data(attention_mask):
53
- seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
54
- indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
55
- max_seqlen_in_batch = seqlens_in_batch.max().item()
56
- cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
57
- return (
58
- indices,
59
- cu_seqlens,
60
- max_seqlen_in_batch,
61
- )
62
-
63
-
64
- # Copied from transformers.models.bart.modeling_bart._make_causal_mask
65
- def _make_causal_mask(
66
- input_ids_shape: torch.Size,
67
- dtype: torch.dtype,
68
- device: torch.device,
69
- past_key_values_length: int = 0,
70
- ):
71
- """Make causal mask used for bi-directional self-attention."""
72
- batch_size, tgt_len = input_ids_shape
73
- mask = torch.full((tgt_len, tgt_len), torch.finfo(torch.float16).min, device=device)
74
- mask_cond = torch.arange(mask.size(-1), device=device)
75
- mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
76
- mask = mask.to(dtype)
77
- if past_key_values_length > 0:
78
- mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
79
- return mask[None, None, :, :].expand(batch_size, 1, tgt_len, tgt_len + past_key_values_length)
80
-
81
-
82
- # Copied from transformers.models.bart.modeling_bart._expand_mask
83
- def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
84
- """Expands attention_mask from `[batch_size, seq_len]` to `[batch_size, 1, tgt_seq_len, src_seq_len]`."""
85
- batch_size, src_len = mask.size()
86
- tgt_len = tgt_len if tgt_len is not None else src_len
87
-
88
- expanded_mask = mask[:, None, None, :].expand(batch_size, 1, tgt_len, src_len).to(dtype)
89
- inverted_mask = 1.0 - expanded_mask
90
-
91
- return inverted_mask.masked_fill(
92
- inverted_mask.to(torch.bool), torch.finfo(dtype).min
93
- )
94
-
95
-
96
- class RotaryEmbedding(nn.Module):
97
- def __init__(
98
- self,
99
- dim: int,
100
- max_position_embeddings: int,
101
- base: int = 10_000,
102
- device: Optional[torch.device] = None,
103
- ):
104
- super().__init__()
105
-
106
- self.dim = dim
107
- self.max_position_embeddings = max_position_embeddings
108
- self.base = base
109
- inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, device=device, dtype=torch.float32) / self.dim))
110
- self.register_buffer("inv_freq", inv_freq, persistent=False)
111
-
112
- # Build here to make `torch.jit.trace` work.
113
- self._set_cos_sin_cache(
114
- seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype(),
115
- )
116
-
117
- def _set_cos_sin_cache(self, seq_len: int, device: torch.device, dtype: torch.dtype):
118
- self.max_seq_len_cached = seq_len
119
- t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.float32)
120
-
121
- # Don't do einsum, it converts fp32 to fp16 under AMP
122
- # freqs = torch.einsum("i,j->ij", t, self.inv_freq)
123
- freqs = torch.outer(t, self.inv_freq)
124
- # Different from paper, but it uses a different permutation in order to obtain the same calculation
125
- emb = torch.cat((freqs, freqs), dim=-1)
126
- self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
127
- self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
128
-
129
- def forward(self, x: torch.Tensor, seq_len: Optional[int] = None):
130
- # x: [batch_size, num_heads, seq_len, head_size]
131
- if seq_len > self.max_seq_len_cached:
132
- self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=torch.get_default_dtype())
133
- return (
134
- self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
135
- self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
136
- )
137
-
138
-
139
- def rotate_half(x: torch.Tensor):
140
- """Rotates half the hidden dims of the input."""
141
- x1, x2 = torch.chunk(x, 2, dim=-1)
142
- return torch.cat((-x2, x1), dim=-1)
143
-
144
-
145
- def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
146
- # The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
147
- cos = cos.squeeze(1).squeeze(0) # [seq_len, dim]
148
- sin = sin.squeeze(1).squeeze(0) # [seq_len, dim]
149
- cos = cos[position_ids].unsqueeze(1) # [batch_size, 1, seq_len, dim]
150
- sin = sin[position_ids].unsqueeze(1) # [batch_size, 1, seq_len, dim]
151
- q_embed = (q * cos) + (rotate_half(q) * sin)
152
- k_embed = (k * cos) + (rotate_half(k) * sin)
153
- return q_embed, k_embed
154
-
155
-
156
- class MLP(nn.Module):
157
- def __init__(self, config: StableLMEpochConfig):
158
- super().__init__()
159
- self.config = config
160
- self.hidden_size = config.hidden_size
161
- self.intermediate_size = config.intermediate_size
162
- self.gate_proj = nn.Linear(config.hidden_size, config.intermediate_size, bias=False)
163
- self.up_proj = nn.Linear(config.hidden_size, config.intermediate_size, bias=False)
164
- self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
165
- self.act_fn = nn.SiLU()
166
-
167
- def forward(self, x: torch.Tensor) -> torch.Tensor:
168
- return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
169
-
170
-
171
- def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
172
- """
173
- This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
174
- num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
175
- """
176
- batch, num_key_value_heads, slen, head_dim = hidden_states.shape
177
- if n_rep == 1:
178
- return hidden_states
179
- hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
180
- return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
181
-
182
-
183
- class Attention(nn.Module):
184
- def __init__(self, config: StableLMEpochConfig):
185
- super().__init__()
186
- self.config = config
187
- self.hidden_size = config.hidden_size
188
- self.num_heads = config.num_attention_heads
189
- self.head_dim = self.hidden_size // self.num_heads
190
- self.num_key_value_heads = config.num_key_value_heads
191
- self.num_key_value_groups = self.num_heads // self.num_key_value_heads
192
- self.max_position_embeddings = config.max_position_embeddings
193
- self.is_causal = True
194
-
195
- if (self.head_dim * self.num_heads) != self.hidden_size:
196
- raise ValueError(
197
- f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
198
- f" and `num_heads`: {self.num_heads})."
199
- )
200
- self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
201
- self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
202
- self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
203
- self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
204
-
205
- self._init_rope()
206
-
207
- def _init_rope(self):
208
- self.rotary_ndims = int(self.head_dim * self.config.rope_pct)
209
- self.rotary_emb = RotaryEmbedding(
210
- self.rotary_ndims,
211
- max_position_embeddings=self.config.max_position_embeddings,
212
- base=self.config.rope_theta,
213
- )
214
-
215
- def forward(
216
- self,
217
- hidden_states: torch.FloatTensor,
218
- attention_mask: torch.FloatTensor,
219
- position_ids: torch.LongTensor,
220
- past_key_value: Optional[Tuple[torch.Tensor]] = None,
221
- output_attentions: Optional[bool] = False,
222
- use_cache: Optional[bool] = False,
223
- ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
224
- bsz, q_len, _ = hidden_states.size()
225
-
226
- query_states = self.q_proj(hidden_states)
227
- key_states = self.k_proj(hidden_states)
228
- value_states = self.v_proj(hidden_states)
229
-
230
- query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
231
- key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
232
- value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
233
-
234
- query_rot = query_states[..., : self.rotary_ndims]
235
- query_pass = query_states[..., self.rotary_ndims :]
236
- key_rot = key_states[..., : self.rotary_ndims]
237
- key_pass = key_states[..., self.rotary_ndims :]
238
-
239
- kv_seq_len = key_states.shape[-2]
240
- if past_key_value is not None:
241
- kv_seq_len += past_key_value[0].shape[-2]
242
- cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
243
- query_states, key_states = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids)
244
-
245
- # [batch_size, num_heads, seq_len, head_dim]
246
- query_states = torch.cat((query_states, query_pass), dim=-1)
247
- key_states = torch.cat((key_states, key_pass), dim=-1)
248
-
249
- if past_key_value is not None:
250
- # Reuse k, v, self_attention
251
- key_states = torch.cat((past_key_value[0], key_states), dim=2)
252
- value_states = torch.cat((past_key_value[1], value_states), dim=2)
253
-
254
- past_key_value = (key_states, value_states) if use_cache else None
255
-
256
- # Repeat k/v heads if n_kv_heads < n_heads
257
- key_states = repeat_kv(key_states, self.num_key_value_groups)
258
- value_states = repeat_kv(value_states, self.num_key_value_groups)
259
-
260
- attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
261
-
262
- if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
263
- raise ValueError(
264
- f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
265
- f" {attn_weights.size()}"
266
- )
267
-
268
- if attention_mask is not None:
269
- if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
270
- raise ValueError(
271
- f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
272
- )
273
- attn_weights = attn_weights + attention_mask
274
-
275
- # Upcast attention to fp32
276
- attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
277
- attn_output = torch.matmul(attn_weights, value_states)
278
-
279
- if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
280
- raise ValueError(
281
- f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
282
- f" {attn_output.size()}"
283
- )
284
-
285
- # Merge heads
286
- attn_output = attn_output.transpose(1, 2).contiguous()
287
- attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
288
-
289
- # Final linear projection
290
- attn_output = self.o_proj(attn_output)
291
-
292
- if not output_attentions:
293
- attn_weights = None
294
-
295
- return attn_output, attn_weights, past_key_value
296
-
297
-
298
- class FlashAttention2(Attention):
299
- """
300
- Reference: https://github.com/huggingface/transformers/blob/5d36025ca13d05151b7a0c761e90d429c4644a30/src/transformers/models/llama/modeling_llama.py#L456
301
- """
302
-
303
- def __init__(self, *args, **kwargs):
304
- super().__init__(*args, **kwargs)
305
-
306
- # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
307
- # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
308
- # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
309
- self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
310
-
311
- def forward(
312
- self,
313
- hidden_states: torch.Tensor,
314
- attention_mask: Optional[torch.LongTensor] = None,
315
- position_ids: Optional[torch.LongTensor] = None,
316
- past_key_value: Optional[Cache] = None,
317
- output_attentions: bool = False,
318
- use_cache: bool = False,
319
- **kwargs,
320
- ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
321
- # FlashAttention2 attention does not support output_attentions
322
- if "padding_mask" in kwargs:
323
- warnings.warn(
324
- "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
325
- )
326
-
327
- # overwrite attention_mask with padding_mask
328
- attention_mask = kwargs.pop("padding_mask")
329
-
330
- output_attentions = False
331
-
332
- bsz, q_len, _ = hidden_states.size()
333
-
334
- query_states = self.q_proj(hidden_states)
335
- key_states = self.k_proj(hidden_states)
336
- value_states = self.v_proj(hidden_states)
337
-
338
- # Flash attention requires the input to have the shape
339
- # batch_size x seq_length x head_dim x hidden_dim
340
- # therefore we just need to keep the original shape
341
- query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
342
- key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
343
- value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
344
-
345
- query_rot = query_states[..., : self.rotary_ndims]
346
- query_pass = query_states[..., self.rotary_ndims :]
347
- key_rot = key_states[..., : self.rotary_ndims]
348
- key_pass = key_states[..., self.rotary_ndims :]
349
-
350
- kv_seq_len = key_states.shape[-2]
351
- if past_key_value is not None:
352
- kv_seq_len += past_key_value[0].shape[-2]
353
- cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
354
- query_states, key_states = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids)
355
-
356
- # [batch_size, num_heads, seq_len, head_dim]
357
- query_states = torch.cat((query_states, query_pass), dim=-1)
358
- key_states = torch.cat((key_states, key_pass), dim=-1)
359
-
360
- if past_key_value is not None:
361
- # Reuse k, v, self_attention
362
- key_states = torch.cat((past_key_value[0], key_states), dim=2)
363
- value_states = torch.cat((past_key_value[1], value_states), dim=2)
364
-
365
- past_key_value = (key_states, value_states) if use_cache else None
366
-
367
- # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
368
- # to be able to avoid many of these transpose/reshape/view.
369
- query_states = query_states.transpose(1, 2)
370
- key_states = key_states.transpose(1, 2)
371
- value_states = value_states.transpose(1, 2)
372
-
373
- dropout_rate = self.attention_dropout if self.training else 0.0
374
-
375
- attn_output = self._flash_attention_forward(
376
- query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
377
- )
378
- attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
379
- attn_output = self.o_proj(attn_output)
380
-
381
- if not output_attentions:
382
- attn_weights = None
383
-
384
- return attn_output, attn_weights, past_key_value
385
-
386
- def _flash_attention_forward(
387
- self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
388
- ):
389
- """
390
- Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
391
- first unpad the input, then computes the attention scores and pad the final attention scores.
392
-
393
- Args:
394
- query_states (`torch.Tensor`):
395
- Input query states to be passed to Flash Attention API
396
- key_states (`torch.Tensor`):
397
- Input key states to be passed to Flash Attention API
398
- value_states (`torch.Tensor`):
399
- Input value states to be passed to Flash Attention API
400
- attention_mask (`torch.Tensor`):
401
- The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
402
- position of padding tokens and 1 for the position of non-padding tokens.
403
- dropout (`int`, *optional*):
404
- Attention dropout
405
- softmax_scale (`float`, *optional*):
406
- The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
407
- """
408
- if not self._flash_attn_uses_top_left_mask:
409
- causal = self.is_causal
410
- else:
411
- # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in FlashAttention2 __init__.
412
- causal = self.is_causal and query_length != 1
413
-
414
- # Contains at least one padding token in the sequence
415
- if attention_mask is not None:
416
- batch_size = query_states.shape[0]
417
- query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
418
- query_states, key_states, value_states, attention_mask, query_length
419
- )
420
-
421
- cu_seqlens_q, cu_seqlens_k = cu_seq_lens
422
- max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
423
-
424
- attn_output_unpad = flash_attn_varlen_func(
425
- query_states,
426
- key_states,
427
- value_states,
428
- cu_seqlens_q=cu_seqlens_q,
429
- cu_seqlens_k=cu_seqlens_k,
430
- max_seqlen_q=max_seqlen_in_batch_q,
431
- max_seqlen_k=max_seqlen_in_batch_k,
432
- dropout_p=dropout,
433
- softmax_scale=softmax_scale,
434
- causal=causal,
435
- )
436
-
437
- attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
438
- else:
439
- attn_output = flash_attn_func(
440
- query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
441
- )
442
-
443
- return attn_output
444
-
445
- def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
446
- indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
447
- batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
448
-
449
- key_layer = index_first_axis(
450
- key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
451
- )
452
- value_layer = index_first_axis(
453
- value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
454
- )
455
- if query_length == kv_seq_len:
456
- query_layer = index_first_axis(
457
- query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
458
- )
459
- cu_seqlens_q = cu_seqlens_k
460
- max_seqlen_in_batch_q = max_seqlen_in_batch_k
461
- indices_q = indices_k
462
- elif query_length == 1:
463
- max_seqlen_in_batch_q = 1
464
- cu_seqlens_q = torch.arange(
465
- batch_size + 1, dtype=torch.int32, device=query_layer.device
466
- ) # There is a memcpy here, that is very bad.
467
- indices_q = cu_seqlens_q[:-1]
468
- query_layer = query_layer.squeeze(1)
469
- else:
470
- # The -q_len: slice assumes left padding.
471
- attention_mask = attention_mask[:, -query_length:]
472
- query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
473
-
474
- return (
475
- query_layer,
476
- key_layer,
477
- value_layer,
478
- indices_q,
479
- (cu_seqlens_q, cu_seqlens_k),
480
- (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
481
- )
482
-
483
-
484
- ATTENTION_CLASSES = {
485
- "eager": Attention,
486
- "flash_attention_2": FlashAttention2,
487
- }
488
-
489
-
490
- class DecoderLayer(nn.Module):
491
- def __init__(self, config: StableLMEpochConfig):
492
- super().__init__()
493
- self.self_attn = ATTENTION_CLASSES[config._attn_implementation](config=config)
494
- self.mlp = MLP(config)
495
- self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps)
496
- self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps)
497
-
498
- def forward(
499
- self,
500
- hidden_states: Optional[torch.FloatTensor],
501
- attention_mask: Optional[torch.FloatTensor] = None,
502
- position_ids: Optional[torch.LongTensor] = None,
503
- past_key_value: Optional[Tuple[torch.Tensor]] = None,
504
- output_attentions: Optional[bool] = False,
505
- use_cache: Optional[bool] = False,
506
- ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
507
- residual = hidden_states
508
-
509
- hidden_states = self.input_layernorm(hidden_states)
510
-
511
- # Self Attention
512
- hidden_states, self_attn_weights, present_key_value = self.self_attn(
513
- hidden_states=hidden_states,
514
- attention_mask=attention_mask,
515
- position_ids=position_ids,
516
- past_key_value=past_key_value,
517
- output_attentions=output_attentions,
518
- use_cache=use_cache,
519
- )
520
- hidden_states = residual + hidden_states
521
-
522
- # Fully Connected
523
- residual = hidden_states
524
- hidden_states = self.post_attention_layernorm(hidden_states)
525
- hidden_states = self.mlp(hidden_states)
526
- hidden_states = residual + hidden_states
527
-
528
- outputs = (hidden_states,)
529
-
530
- if output_attentions:
531
- outputs += (self_attn_weights,)
532
-
533
- if use_cache:
534
- outputs += (present_key_value,)
535
-
536
- return outputs
537
-
538
-
539
- class StableLMEpochPreTrainedModel(PreTrainedModel):
540
- """An abstract class to handle weights initialization and a simple interface
541
- for downloading and loading pretrained models.
542
- """
543
-
544
- config_class = StableLMEpochConfig
545
- base_model_prefix = "transformer"
546
- supports_gradient_checkpointing = True
547
- _no_split_modules = ["DecoderLayer"]
548
- _skip_keys_device_placement = "past_key_values"
549
- _supports_flash_attn_2 = True
550
-
551
- def _init_weights(self, module: nn.Module):
552
- """Initialize the weights"""
553
- if isinstance(module, nn.Linear):
554
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
555
- if module.bias is not None:
556
- module.bias.data.zero_()
557
- elif isinstance(module, nn.Embedding):
558
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
559
- if module.padding_idx is not None:
560
- module.weight.data[module.padding_idx].zero_()
561
- elif isinstance(module, nn.LayerNorm):
562
- module.bias.data.zero_()
563
- module.weight.data.fill_(1.0)
564
-
565
- def _set_gradient_checkpointing(self, module: nn.Module, value=False):
566
- if isinstance(module, StableLMEpochModel):
567
- module.gradient_checkpointing = value
568
-
569
-
570
- class StableLMEpochModel(StableLMEpochPreTrainedModel):
571
- def __init__(self, config: StableLMEpochConfig):
572
- super().__init__(config)
573
- self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
574
- self.layers = nn.ModuleList([DecoderLayer(config) for _ in range(config.num_hidden_layers)])
575
- self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps)
576
-
577
- self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
578
- self.gradient_checkpointing = False
579
- # Initialize weights and apply final processing
580
- self.post_init()
581
-
582
- def get_input_embeddings(self):
583
- return self.embed_tokens
584
-
585
- def set_input_embeddings(self, value: nn.Module):
586
- self.embed_tokens = value
587
-
588
- # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
589
- def _prepare_decoder_attention_mask(
590
- self,
591
- attention_mask: torch.Tensor,
592
- input_shape: torch.Size,
593
- inputs_embeds: torch.Tensor,
594
- past_key_values_length: int,
595
- ):
596
- # Create causal mask
597
- # [batch_size, seq_len] -> [batch_size, 1, tgt_seq_len, src_seq_len]
598
- combined_attention_mask = None
599
- if input_shape[-1] > 1:
600
- combined_attention_mask = _make_causal_mask(
601
- input_shape,
602
- inputs_embeds.dtype,
603
- device=inputs_embeds.device,
604
- past_key_values_length=past_key_values_length,
605
- )
606
-
607
- if attention_mask is not None:
608
- # [batch_size, seq_len] -> [batch_size, 1, tgt_seq_len, src_seq_len]
609
- expanded_attn_mask = _expand_mask(
610
- attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
611
- ).to(inputs_embeds.device)
612
- combined_attention_mask = expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
613
-
614
- return combined_attention_mask
615
-
616
- def forward(
617
- self,
618
- input_ids: Optional[torch.LongTensor] = None,
619
- attention_mask: Optional[torch.FloatTensor] = None,
620
- position_ids: Optional[torch.LongTensor] = None,
621
- past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
622
- inputs_embeds: Optional[torch.FloatTensor] = None,
623
- use_cache: Optional[bool] = None,
624
- output_attentions: Optional[bool] = None,
625
- output_hidden_states: Optional[bool] = None,
626
- return_dict: Optional[bool] = None,
627
- ) -> Union[Tuple, BaseModelOutputWithPast]:
628
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
629
- output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
630
- use_cache = use_cache if use_cache is not None else self.config.use_cache
631
-
632
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
633
-
634
- # Retrieve input_ids and inputs_embeds
635
- if input_ids is not None and inputs_embeds is not None:
636
- raise ValueError(
637
- "You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time"
638
- )
639
- elif input_ids is not None:
640
- batch_size, seq_length = input_ids.shape
641
- elif inputs_embeds is not None:
642
- batch_size, seq_length, _ = inputs_embeds.shape
643
- else:
644
- raise ValueError(
645
- "You have to specify either decoder_input_ids or decoder_inputs_embeds"
646
- )
647
-
648
- seq_length_with_past = seq_length
649
- past_key_values_length = 0
650
-
651
- if position_ids is None:
652
- device = input_ids.device if input_ids is not None else inputs_embeds.device
653
- position_ids = torch.arange(
654
- past_key_values_length,
655
- seq_length + past_key_values_length,
656
- dtype=torch.long,
657
- device=device,
658
- )
659
- position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
660
- else:
661
- position_ids = position_ids.view(-1, seq_length).long()
662
-
663
- if inputs_embeds is None:
664
- inputs_embeds = self.embed_tokens(input_ids)
665
- # Embed positions
666
- if self._use_flash_attention_2:
667
- # 2d mask is passed through the layers
668
- attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
669
- else:
670
- if attention_mask is None:
671
- attention_mask = torch.ones(
672
- (batch_size, seq_length_with_past),
673
- dtype=torch.bool,
674
- device=inputs_embeds.device,
675
- )
676
- attention_mask = self._prepare_decoder_attention_mask(
677
- attention_mask,
678
- (batch_size, seq_length),
679
- inputs_embeds,
680
- past_key_values_length,
681
- )
682
-
683
- hidden_states = inputs_embeds
684
-
685
- if self.gradient_checkpointing and self.training:
686
- if use_cache:
687
- logger.warning(
688
- "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
689
- )
690
- use_cache = False
691
-
692
- # Decoder layers
693
- all_hidden_states = () if output_hidden_states else None
694
- all_self_attns = () if output_attentions else None
695
- next_decoder_cache = () if use_cache else None
696
-
697
- for idx, decoder_layer in enumerate(self.layers):
698
- if output_hidden_states:
699
- all_hidden_states += (hidden_states,)
700
-
701
- past_key_value = (
702
- past_key_values[idx] if past_key_values is not None else None
703
- )
704
-
705
- if self.gradient_checkpointing and self.training:
706
-
707
- def create_custom_forward(module):
708
- def custom_forward(*inputs):
709
- # None for past_key_value
710
- return module(*inputs, past_key_value, output_attentions)
711
-
712
- return custom_forward
713
-
714
- layer_outputs = torch.utils.checkpoint.checkpoint(
715
- create_custom_forward(decoder_layer),
716
- hidden_states,
717
- attention_mask,
718
- position_ids,
719
- )
720
- else:
721
- layer_outputs = decoder_layer(
722
- hidden_states,
723
- attention_mask=attention_mask,
724
- position_ids=position_ids,
725
- past_key_value=past_key_value,
726
- output_attentions=output_attentions,
727
- use_cache=use_cache,
728
- )
729
-
730
- hidden_states = layer_outputs[0]
731
-
732
- if use_cache:
733
- next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
734
-
735
- if output_attentions:
736
- all_self_attns += (layer_outputs[1],)
737
-
738
- hidden_states = self.norm(hidden_states)
739
-
740
- # Add hidden states from the last decoder layer
741
- if output_hidden_states:
742
- all_hidden_states += (hidden_states,)
743
-
744
- next_cache = next_decoder_cache if use_cache else None
745
- if not return_dict:
746
- return tuple(
747
- v
748
- for v in [hidden_states, next_cache, all_hidden_states, all_self_attns]
749
- if v is not None
750
- )
751
- return BaseModelOutputWithPast(
752
- last_hidden_state=hidden_states,
753
- past_key_values=next_cache,
754
- hidden_states=all_hidden_states,
755
- attentions=all_self_attns,
756
- )
757
-
758
-
759
- class StableLMEpochForCausalLM(StableLMEpochPreTrainedModel):
760
- _tied_weights_keys = ["lm_head.weight"]
761
-
762
- def __init__(self, config: StableLMEpochConfig):
763
- super().__init__(config)
764
-
765
- self.model = StableLMEpochModel(config)
766
- self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
767
-
768
- # Initialize weights and apply final processing
769
- self.post_init()
770
-
771
- def get_input_embeddings(self):
772
- return self.model.embed_tokens
773
-
774
- def set_input_embeddings(self, value):
775
- self.model.embed_tokens = value
776
-
777
- def get_output_embeddings(self):
778
- return self.lm_head
779
-
780
- def set_output_embeddings(self, new_embeddings: nn.Module):
781
- self.lm_head = new_embeddings
782
-
783
- def get_decoder(self):
784
- return self.model
785
-
786
- def set_decoder(self, decoder):
787
- self.model = decoder
788
-
789
- def forward(
790
- self,
791
- input_ids: Optional[torch.LongTensor] = None,
792
- attention_mask: Optional[torch.FloatTensor] = None,
793
- position_ids: Optional[torch.LongTensor] = None,
794
- past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
795
- inputs_embeds: Optional[torch.FloatTensor] = None,
796
- labels: Optional[torch.LongTensor] = None,
797
- use_cache: Optional[bool] = None,
798
- output_attentions: Optional[bool] = None,
799
- output_hidden_states: Optional[bool] = None,
800
- return_dict: Optional[bool] = None,
801
- ) -> Union[Tuple, CausalLMOutputWithPast]:
802
- output_attentions = (
803
- output_attentions
804
- if output_attentions is not None
805
- else self.config.output_attentions
806
- )
807
- output_hidden_states = (
808
- output_hidden_states
809
- if output_hidden_states is not None
810
- else self.config.output_hidden_states
811
- )
812
- return_dict = (
813
- return_dict if return_dict is not None else self.config.use_return_dict
814
- )
815
-
816
- # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
817
- outputs = self.model(
818
- input_ids,
819
- attention_mask=attention_mask,
820
- position_ids=position_ids,
821
- past_key_values=past_key_values,
822
- inputs_embeds=inputs_embeds,
823
- use_cache=use_cache,
824
- output_attentions=output_attentions,
825
- output_hidden_states=output_hidden_states,
826
- return_dict=return_dict,
827
- )
828
-
829
- hidden_states = outputs[0]
830
- logits = self.lm_head(hidden_states).float()
831
-
832
- loss = None
833
- if labels is not None:
834
- # Shift so that tokens < n predict n
835
- shift_logits = logits[..., :-1, :].contiguous()
836
- shift_labels = labels[..., 1:].contiguous()
837
- # Flatten the tokens
838
- loss_fct = CrossEntropyLoss()
839
- shift_logits = shift_logits.view(-1, self.config.vocab_size)
840
- shift_labels = shift_labels.view(-1)
841
- # Enable model parallelism
842
- shift_labels = shift_labels.to(shift_logits.device)
843
- loss = loss_fct(shift_logits, shift_labels)
844
-
845
- if not return_dict:
846
- output = (logits,) + outputs[1:]
847
- return (loss,) + output if loss is not None else output
848
-
849
- return CausalLMOutputWithPast(
850
- loss=loss,
851
- logits=logits,
852
- past_key_values=outputs.past_key_values,
853
- hidden_states=outputs.hidden_states,
854
- attentions=outputs.attentions,
855
- )
856
-
857
- def prepare_inputs_for_generation(
858
- self,
859
- input_ids,
860
- past_key_values: Optional[torch.Tensor] = None,
861
- attention_mask: Optional[torch.Tensor] = None,
862
- inputs_embeds: Optional[torch.Tensor] = None,
863
- **kwargs,
864
- ):
865
- # Trim decoder_input_ids if past is used
866
- if past_key_values is not None:
867
- past_length = past_key_values[0][0].shape[2]
868
-
869
- # Some generation methods already pass only the last input ID
870
- if input_ids.shape[1] > past_length:
871
- remove_prefix_length = past_length
872
- else:
873
- # Default to old behavior: keep only final ID
874
- remove_prefix_length = input_ids.shape[1] - 1
875
-
876
- input_ids = input_ids[:, remove_prefix_length:]
877
-
878
- position_ids = kwargs.get("position_ids", None)
879
- if attention_mask is not None and position_ids is None:
880
- # Create position_ids on the fly for batch generation
881
- position_ids = attention_mask.long().cumsum(-1) - 1
882
- position_ids.masked_fill_(attention_mask == 0, 1)
883
- if past_key_values:
884
- position_ids = position_ids[:, -1].unsqueeze(-1)
885
-
886
- # If `inputs_embeds` are passed, we only want to use them in the 1st generation step
887
- if inputs_embeds is not None and past_key_values is None:
888
- model_inputs = {"inputs_embeds": inputs_embeds}
889
- else:
890
- model_inputs = {"input_ids": input_ids}
891
-
892
- model_inputs.update(
893
- {
894
- "attention_mask": attention_mask,
895
- "past_key_values": past_key_values,
896
- "use_cache": kwargs.get("use_cache"),
897
- "position_ids": position_ids,
898
- }
899
- )
900
- return model_inputs
901
-
902
- @staticmethod
903
- def _reorder_cache(past_key_values, beam_idx):
904
- reordered_past = ()
905
- for layer_past in past_key_values:
906
- reordered_past += (
907
- tuple(
908
- past_state.index_select(0, beam_idx.to(past_state.device))
909
- for past_state in layer_past
910
- ),
911
- )
912
- return reordered_past
913
-
914
-
915
- StableLMEpochConfig.register_for_auto_class()
916
- StableLMEpochForCausalLM.register_for_auto_class("AutoModelForCausalLM")