zR commited on
Commit
fb328f2
1 Parent(s): 04e0173

support transformers 4.44

Browse files
Files changed (1) hide show
  1. modeling_chatglm.py +1141 -0
modeling_chatglm.py ADDED
@@ -0,0 +1,1141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ PyTorch ChatGLM model. """
2
+ import json
3
+ import math
4
+ import copy
5
+ import warnings
6
+ import re
7
+ import sys
8
+
9
+ import torch
10
+ import torch.utils.checkpoint
11
+ import torch.nn.functional as F
12
+ from torch import nn
13
+ from torch.nn import CrossEntropyLoss, LayerNorm, MSELoss, BCEWithLogitsLoss
14
+ from torch.nn.utils import skip_init
15
+ from typing import Optional, Tuple, Union, List, Callable, Dict, Any
16
+ from copy import deepcopy
17
+
18
+ from transformers.modeling_outputs import (
19
+ BaseModelOutputWithPast,
20
+ CausalLMOutputWithPast,
21
+ SequenceClassifierOutputWithPast,
22
+ )
23
+ from transformers.modeling_utils import PreTrainedModel
24
+ from transformers.utils import logging, is_torch_npu_available
25
+ from transformers.generation.logits_process import LogitsProcessor
26
+ from transformers.generation.utils import LogitsProcessorList, StoppingCriteriaList, GenerationConfig, ModelOutput
27
+
28
+ from .configuration_chatglm import ChatGLMConfig
29
+
30
+ try:
31
+ from transformers.utils import is_flash_attn_greater_or_equal_2_10, is_flash_attn_2_available
32
+ if is_flash_attn_2_available():
33
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
34
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input # noqa
35
+ except:
36
+ pass
37
+
38
+
39
+ # flags required to enable jit fusion kernels
40
+
41
+ if sys.platform != 'darwin' and not is_torch_npu_available():
42
+ torch._C._jit_set_profiling_mode(False)
43
+ torch._C._jit_set_profiling_executor(False)
44
+ torch._C._jit_override_can_fuse_on_cpu(True)
45
+ torch._C._jit_override_can_fuse_on_gpu(True)
46
+
47
+ logger = logging.get_logger(__name__)
48
+
49
+ _CHECKPOINT_FOR_DOC = "THUDM/ChatGLM"
50
+ _CONFIG_FOR_DOC = "ChatGLMConfig"
51
+
52
+
53
+ def default_init(cls, *args, **kwargs):
54
+ return cls(*args, **kwargs)
55
+
56
+
57
+ class InvalidScoreLogitsProcessor(LogitsProcessor):
58
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor:
59
+ if torch.isnan(scores).any() or torch.isinf(scores).any():
60
+ scores.zero_()
61
+ scores[..., 198] = 5e4
62
+ return scores
63
+
64
+
65
+ def split_tensor_along_last_dim(
66
+ tensor: torch.Tensor,
67
+ num_partitions: int,
68
+ contiguous_split_chunks: bool = False,
69
+ ) -> List[torch.Tensor]:
70
+ """Split a tensor along its last dimension.
71
+
72
+ Arguments:
73
+ tensor: input tensor.
74
+ num_partitions: number of partitions to split the tensor
75
+ contiguous_split_chunks: If True, make each chunk contiguous
76
+ in memory.
77
+
78
+ Returns:
79
+ A list of Tensors
80
+ """
81
+ # Get the size and dimension.
82
+ last_dim = tensor.dim() - 1
83
+ last_dim_size = tensor.size()[last_dim] // num_partitions
84
+ # Split.
85
+ tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
86
+ # Note: torch.split does not create contiguous tensors by default.
87
+ if contiguous_split_chunks:
88
+ return tuple(chunk.contiguous() for chunk in tensor_list)
89
+
90
+ return tensor_list
91
+
92
+
93
+ class RotaryEmbedding(nn.Module):
94
+ def __init__(self, dim, rope_ratio=1, original_impl=False, device=None, dtype=None):
95
+ super().__init__()
96
+ inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, device=device).to(dtype=dtype) / dim))
97
+ self.register_buffer("inv_freq", inv_freq)
98
+ self.dim = dim
99
+ self.original_impl = original_impl
100
+ self.rope_ratio = rope_ratio
101
+
102
+ def forward_impl(
103
+ self, seq_len: int, n_elem: int, dtype: torch.dtype, device: torch.device, base: int = 10000
104
+ ):
105
+ """Enhanced Transformer with Rotary Position Embedding.
106
+
107
+ Derived from: https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/labml_nn/
108
+ transformers/rope/__init__.py. MIT License:
109
+ https://github.com/labmlai/annotated_deep_learning_paper_implementations/blob/master/license.
110
+ """
111
+ # $\Theta = {\theta_i = 10000^{\frac{2(i-1)}{d}}, i \in [1, 2, ..., \frac{d}{2}]}$
112
+ base = base * self.rope_ratio
113
+ theta = 1.0 / (base ** (torch.arange(0, n_elem, 2, dtype=torch.float, device=device) / n_elem))
114
+
115
+ # Create position indexes `[0, 1, ..., seq_len - 1]`
116
+ seq_idx = torch.arange(seq_len, dtype=torch.float, device=device)
117
+
118
+ # Calculate the product of position index and $\theta_i$
119
+ idx_theta = torch.outer(seq_idx, theta).float()
120
+
121
+ cache = torch.stack([torch.cos(idx_theta), torch.sin(idx_theta)], dim=-1)
122
+
123
+ # this is to mimic the behaviour of complex32, else we will get different results
124
+ if dtype in (torch.float16, torch.bfloat16, torch.int8):
125
+ cache = cache.bfloat16() if dtype == torch.bfloat16 else cache.half()
126
+ return cache
127
+
128
+ def forward(self, max_seq_len, offset=0):
129
+ return self.forward_impl(
130
+ max_seq_len, self.dim, dtype=self.inv_freq.dtype, device=self.inv_freq.device
131
+ )
132
+
133
+
134
+ @torch.jit.script
135
+ def apply_rotary_pos_emb(x: torch.Tensor, rope_cache: torch.Tensor) -> torch.Tensor:
136
+ # x: [b, np, sq, hn]
137
+ b, np, sq, hn = x.size(0), x.size(1), x.size(2), x.size(3)
138
+ rot_dim = rope_cache.shape[-2] * 2
139
+ x, x_pass = x[..., :rot_dim], x[..., rot_dim:]
140
+ # truncate to support variable sizes
141
+ rope_cache = rope_cache[:, :sq]
142
+ xshaped = x.reshape(b, np, sq, rot_dim // 2, 2)
143
+ rope_cache = rope_cache.view(-1, 1, sq, xshaped.size(3), 2)
144
+ x_out2 = torch.stack(
145
+ [
146
+ xshaped[..., 0] * rope_cache[..., 0] - xshaped[..., 1] * rope_cache[..., 1],
147
+ xshaped[..., 1] * rope_cache[..., 0] + xshaped[..., 0] * rope_cache[..., 1],
148
+ ],
149
+ -1,
150
+ )
151
+ x_out2 = x_out2.flatten(3)
152
+ return torch.cat((x_out2, x_pass), dim=-1)
153
+
154
+
155
+ class RMSNorm(torch.nn.Module):
156
+ def __init__(self, normalized_shape, eps=1e-5, device=None, dtype=None, **kwargs):
157
+ super().__init__()
158
+ self.weight = torch.nn.Parameter(torch.empty(normalized_shape, device=device, dtype=dtype))
159
+ self.eps = eps
160
+
161
+ def forward(self, hidden_states: torch.Tensor):
162
+ input_dtype = hidden_states.dtype
163
+ variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True)
164
+ hidden_states = hidden_states * torch.rsqrt(variance + self.eps)
165
+
166
+ return (self.weight * hidden_states).to(input_dtype)
167
+
168
+
169
+ class CoreAttention(torch.nn.Module):
170
+ def __init__(self, config: ChatGLMConfig, layer_number):
171
+ super(CoreAttention, self).__init__()
172
+ self.config = config
173
+ self.apply_query_key_layer_scaling = config.apply_query_key_layer_scaling
174
+ self.attention_softmax_in_fp32 = config.attention_softmax_in_fp32
175
+ if self.apply_query_key_layer_scaling:
176
+ self.attention_softmax_in_fp32 = True
177
+ self.layer_number = max(1, layer_number)
178
+ self.is_causal = True
179
+
180
+ projection_size = config.kv_channels * config.num_attention_heads
181
+
182
+ # Per attention head and per partition values.
183
+ self.hidden_size_per_partition = projection_size
184
+ self.hidden_size_per_attention_head = projection_size // config.num_attention_heads
185
+ self.num_attention_heads_per_partition = config.num_attention_heads
186
+
187
+ coeff = None
188
+ self.norm_factor = math.sqrt(self.hidden_size_per_attention_head)
189
+ if self.apply_query_key_layer_scaling:
190
+ coeff = self.layer_number
191
+ self.norm_factor *= coeff
192
+ self.coeff = coeff
193
+
194
+ self.attention_dropout = torch.nn.Dropout(config.attention_dropout)
195
+
196
+ def forward(self, query_layer, key_layer, value_layer, attention_mask):
197
+ # [b, np, sq, sk]
198
+ output_size = (query_layer.size(0), query_layer.size(1), query_layer.size(2), key_layer.size(2))
199
+
200
+ # [b, np, sq, hn] -> [b * np, sq, hn]
201
+ query_layer = query_layer.view(output_size[0] * output_size[1], output_size[2], -1)
202
+ # [b, np, sk, hn] -> [b * np, sk, hn]
203
+ key_layer = key_layer.view(output_size[0] * output_size[1], output_size[3], -1)
204
+
205
+ # preallocting input tensor: [b * np, sq, sk]
206
+ matmul_input_buffer = torch.empty(
207
+ output_size[0] * output_size[1], output_size[2], output_size[3], dtype=query_layer.dtype,
208
+ device=query_layer.device
209
+ )
210
+
211
+ # Raw attention scores. [b * np, sq, sk]
212
+ matmul_result = torch.baddbmm(
213
+ matmul_input_buffer,
214
+ query_layer, # [b * np, sq, hn]
215
+ key_layer.transpose(1, 2), # [b * np, hn, sk]
216
+ beta=0.0,
217
+ alpha=(1.0 / self.norm_factor),
218
+ )
219
+
220
+ # change view to [b, np, sq, sk]
221
+ attention_scores = matmul_result.view(*output_size)
222
+
223
+ # ===========================
224
+ # Attention probs and dropout
225
+ # ===========================
226
+
227
+ # attention scores and attention mask [b, np, sq, sk]
228
+ if self.attention_softmax_in_fp32:
229
+ attention_scores = attention_scores.float()
230
+ if self.coeff is not None:
231
+ attention_scores = attention_scores * self.coeff
232
+ if attention_mask is None and attention_scores.shape[2] == attention_scores.shape[3]:
233
+ attention_mask = torch.ones(output_size[0], 1, output_size[2], output_size[3],
234
+ device=attention_scores.device, dtype=torch.bool)
235
+ attention_mask.tril_()
236
+ attention_mask = ~attention_mask
237
+ if attention_mask is not None:
238
+ attention_scores = attention_scores.masked_fill(attention_mask, float("-inf"))
239
+ attention_probs = F.softmax(attention_scores, dim=-1)
240
+ attention_probs = attention_probs.type_as(value_layer)
241
+
242
+ # This is actually dropping out entire tokens to attend to, which might
243
+ # seem a bit unusual, but is taken from the original Transformer paper.
244
+ attention_probs = self.attention_dropout(attention_probs)
245
+
246
+ # query layer shape: [b * np, sq, hn]
247
+ # value layer shape: [b, np, sk, hn]
248
+ # attention shape: [b, np, sq, sk]
249
+ # context layer shape: [b, np, sq, hn]
250
+ output_size = (value_layer.size(0), value_layer.size(1), query_layer.size(1), value_layer.size(3))
251
+ # change view [b * np, sk, hn]
252
+ value_layer = value_layer.view(output_size[0] * output_size[1], value_layer.size(2), -1)
253
+ # change view [b * np, sq, sk]
254
+ attention_probs = attention_probs.view(output_size[0] * output_size[1], output_size[2], -1)
255
+ # matmul: [b * np, sq, hn]
256
+ context_layer = torch.bmm(attention_probs, value_layer)
257
+ # change view [b, np, sq, hn]
258
+ context_layer = context_layer.view(*output_size)
259
+ # [b, np, sq, hn] --> [b, sq, np, hn]
260
+ context_layer = context_layer.transpose(1, 2).contiguous()
261
+ # [b, sq, np, hn] --> [b, sq, hp]
262
+ new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)
263
+ context_layer = context_layer.reshape(*new_context_layer_shape)
264
+
265
+ return context_layer
266
+
267
+
268
+ class SdpaAttention(CoreAttention):
269
+ def forward(self, query_layer, key_layer, value_layer, attention_mask):
270
+ if attention_mask is None and query_layer.shape[2] == key_layer.shape[2]:
271
+ context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer,
272
+ is_causal=True,
273
+ dropout_p=self.config.attention_dropout if self.training else 0.0)
274
+ else:
275
+ if attention_mask is not None:
276
+ attention_mask = ~attention_mask
277
+ context_layer = torch.nn.functional.scaled_dot_product_attention(query_layer, key_layer, value_layer,
278
+ attention_mask,
279
+ dropout_p=self.config.attention_dropout if self.training else 0.0)
280
+ context_layer = context_layer.transpose(1, 2).contiguous()
281
+ new_context_layer_shape = context_layer.size()[:-2] + (self.hidden_size_per_partition,)
282
+ context_layer = context_layer.reshape(*new_context_layer_shape)
283
+ return context_layer
284
+
285
+
286
+ def _get_unpad_data(attention_mask):
287
+ seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
288
+ indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
289
+ max_seqlen_in_batch = seqlens_in_batch.max().item()
290
+ cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.int32), (1, 0))
291
+ return (
292
+ indices,
293
+ cu_seqlens,
294
+ max_seqlen_in_batch,
295
+ )
296
+
297
+
298
+ # Copied from transformers.models.llama.modeling_llama.LlamaFlashAttention2
299
+ class FlashAttention2(CoreAttention):
300
+ def __init__(self, *args, **kwargs):
301
+ super().__init__(*args, **kwargs)
302
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
303
+
304
+ def forward(self, query_states, key_states, value_states, attention_mask):
305
+ query_states = query_states.transpose(1, 2)
306
+ key_states = key_states.transpose(1, 2)
307
+ value_states = value_states.transpose(1, 2)
308
+ batch_size, query_length = query_states.shape[:2]
309
+ if not self._flash_attn_uses_top_left_mask:
310
+ causal = self.is_causal
311
+ else:
312
+ # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in LlamaFlashAttention2 __init__.
313
+ causal = self.is_causal and query_length != 1
314
+ dropout = self.config.attention_dropout if self.training else 0.0
315
+ # Contains at least one padding token in the sequence
316
+ if attention_mask is not None:
317
+ query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
318
+ query_states, key_states, value_states, attention_mask, query_length
319
+ )
320
+
321
+ cu_seqlens_q, cu_seqlens_k = cu_seq_lens
322
+ max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
323
+
324
+ attn_output_unpad = flash_attn_varlen_func(
325
+ query_states,
326
+ key_states,
327
+ value_states,
328
+ cu_seqlens_q=cu_seqlens_q,
329
+ cu_seqlens_k=cu_seqlens_k,
330
+ max_seqlen_q=max_seqlen_in_batch_q,
331
+ max_seqlen_k=max_seqlen_in_batch_k,
332
+ dropout_p=dropout,
333
+ softmax_scale=None,
334
+ causal=causal,
335
+ )
336
+
337
+ attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
338
+ else:
339
+ attn_output = flash_attn_func(
340
+ query_states, key_states, value_states, dropout, softmax_scale=None, causal=causal
341
+ )
342
+ attn_output = attn_output.reshape(batch_size, query_length, self.hidden_size_per_partition).contiguous()
343
+ return attn_output
344
+
345
+ def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
346
+ indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
347
+ batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
348
+
349
+ key_layer = index_first_axis(
350
+ key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
351
+ )
352
+ value_layer = index_first_axis(
353
+ value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
354
+ )
355
+ if query_length == kv_seq_len:
356
+ query_layer = index_first_axis(
357
+ query_layer.reshape(batch_size * kv_seq_len, self.num_attention_heads_per_partition, head_dim), indices_k
358
+ )
359
+ cu_seqlens_q = cu_seqlens_k
360
+ max_seqlen_in_batch_q = max_seqlen_in_batch_k
361
+ indices_q = indices_k
362
+ elif query_length == 1:
363
+ max_seqlen_in_batch_q = 1
364
+ cu_seqlens_q = torch.arange(
365
+ batch_size + 1, dtype=torch.int32, device=query_layer.device
366
+ ) # There is a memcpy here, that is very bad.
367
+ indices_q = cu_seqlens_q[:-1]
368
+ query_layer = query_layer.squeeze(1)
369
+ else:
370
+ # The -q_len: slice assumes left padding.
371
+ attention_mask = attention_mask[:, -query_length:]
372
+ query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
373
+
374
+ return (
375
+ query_layer,
376
+ key_layer,
377
+ value_layer,
378
+ indices_q,
379
+ (cu_seqlens_q, cu_seqlens_k),
380
+ (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
381
+ )
382
+
383
+
384
+ CORE_ATTENTION_CLASSES = {
385
+ "eager": CoreAttention,
386
+ "sdpa": SdpaAttention,
387
+ "flash_attention_2": FlashAttention2
388
+ }
389
+
390
+
391
+ class SelfAttention(torch.nn.Module):
392
+ """Parallel self-attention layer abstract class.
393
+
394
+ Self-attention layer takes input with size [s, b, h]
395
+ and returns output of the same size.
396
+ """
397
+
398
+ def __init__(self, config: ChatGLMConfig, layer_number, device=None):
399
+ super(SelfAttention, self).__init__()
400
+ self.layer_number = max(1, layer_number)
401
+
402
+ self.projection_size = config.kv_channels * config.num_attention_heads
403
+
404
+ # Per attention head and per partition values.
405
+ self.hidden_size_per_attention_head = self.projection_size // config.num_attention_heads
406
+ self.num_attention_heads_per_partition = config.num_attention_heads
407
+
408
+ self.multi_query_attention = config.multi_query_attention
409
+ self.qkv_hidden_size = 3 * self.projection_size
410
+ if self.multi_query_attention:
411
+ self.num_multi_query_groups_per_partition = config.multi_query_group_num
412
+ self.qkv_hidden_size = (
413
+ self.projection_size + 2 * self.hidden_size_per_attention_head * config.multi_query_group_num
414
+ )
415
+ self.query_key_value = nn.Linear(config.hidden_size, self.qkv_hidden_size,
416
+ bias=config.add_bias_linear or config.add_qkv_bias,
417
+ device=device, **_config_to_kwargs(config)
418
+ )
419
+
420
+ self.core_attention = CORE_ATTENTION_CLASSES[config._attn_implementation](config, self.layer_number)
421
+
422
+ # Output.
423
+ self.dense = nn.Linear(self.projection_size, config.hidden_size, bias=config.add_bias_linear,
424
+ device=device, **_config_to_kwargs(config)
425
+ )
426
+
427
+ def _allocate_memory(self, inference_max_sequence_len, batch_size, device=None, dtype=None):
428
+ if self.multi_query_attention:
429
+ num_attention_heads = self.num_multi_query_groups_per_partition
430
+ else:
431
+ num_attention_heads = self.num_attention_heads_per_partition
432
+ return torch.empty(
433
+ inference_max_sequence_len,
434
+ batch_size,
435
+ num_attention_heads,
436
+ self.hidden_size_per_attention_head,
437
+ dtype=dtype,
438
+ device=device,
439
+ )
440
+
441
+ def forward(
442
+ self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True
443
+ ):
444
+ # hidden_states: [b, sq, h]
445
+
446
+ # =================================================
447
+ # Pre-allocate memory for key-values for inference.
448
+ # =================================================
449
+ # =====================
450
+ # Query, Key, and Value
451
+ # =====================
452
+
453
+ # Attention heads [b, sq, h] --> [b, sq, (np * 3 * hn)]
454
+ mixed_x_layer = self.query_key_value(hidden_states)
455
+
456
+ if self.multi_query_attention:
457
+ (query_layer, key_layer, value_layer) = mixed_x_layer.split(
458
+ [
459
+ self.num_attention_heads_per_partition * self.hidden_size_per_attention_head,
460
+ self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head,
461
+ self.num_multi_query_groups_per_partition * self.hidden_size_per_attention_head,
462
+ ],
463
+ dim=-1,
464
+ )
465
+ query_layer = query_layer.view(
466
+ query_layer.size()[:-1] + (self.num_attention_heads_per_partition, self.hidden_size_per_attention_head)
467
+ )
468
+ key_layer = key_layer.view(
469
+ key_layer.size()[:-1] + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)
470
+ )
471
+ value_layer = value_layer.view(
472
+ value_layer.size()[:-1]
473
+ + (self.num_multi_query_groups_per_partition, self.hidden_size_per_attention_head)
474
+ )
475
+ else:
476
+ new_tensor_shape = mixed_x_layer.size()[:-1] + \
477
+ (self.num_attention_heads_per_partition,
478
+ 3 * self.hidden_size_per_attention_head)
479
+ mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
480
+
481
+ # [b, sq, np, 3 * hn] --> 3 [b, sq, np, hn]
482
+ (query_layer, key_layer, value_layer) = split_tensor_along_last_dim(mixed_x_layer, 3)
483
+
484
+ # [b, sq, np, hn] -> [b, np, sq, hn]
485
+ query_layer, key_layer, value_layer = [k.transpose(1, 2) for k in [query_layer, key_layer, value_layer]]
486
+
487
+ # apply relative positional encoding (rotary embedding)
488
+ if rotary_pos_emb is not None:
489
+ query_layer = apply_rotary_pos_emb(query_layer, rotary_pos_emb)
490
+ key_layer = apply_rotary_pos_emb(key_layer, rotary_pos_emb)
491
+
492
+ # adjust key and value for inference
493
+ if kv_cache is not None:
494
+ cache_k, cache_v = kv_cache
495
+ key_layer = torch.cat((cache_k, key_layer), dim=2)
496
+ value_layer = torch.cat((cache_v, value_layer), dim=2)
497
+ if use_cache:
498
+ if kv_cache is None:
499
+ kv_cache = torch.cat((key_layer.unsqueeze(0).unsqueeze(0), value_layer.unsqueeze(0).unsqueeze(0)),
500
+ dim=1)
501
+ else:
502
+ kv_cache = (key_layer, value_layer)
503
+ else:
504
+ kv_cache = None
505
+
506
+ if self.multi_query_attention:
507
+ key_layer = key_layer.unsqueeze(2)
508
+ key_layer = key_layer.expand(
509
+ -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1, -1
510
+ )
511
+ key_layer = key_layer.contiguous().view(
512
+ key_layer.size()[:1] + (self.num_attention_heads_per_partition,) + key_layer.size()[3:]
513
+ )
514
+ value_layer = value_layer.unsqueeze(2)
515
+ value_layer = value_layer.expand(
516
+ -1, -1, self.num_attention_heads_per_partition // self.num_multi_query_groups_per_partition, -1, -1
517
+ )
518
+ value_layer = value_layer.contiguous().view(
519
+ value_layer.size()[:1] + (self.num_attention_heads_per_partition,) + value_layer.size()[3:]
520
+ )
521
+
522
+ # ==================================
523
+ # core attention computation
524
+ # ==================================
525
+
526
+ context_layer = self.core_attention(query_layer, key_layer, value_layer, attention_mask)
527
+
528
+ # =================
529
+ # Output. [sq, b, h]
530
+ # =================
531
+
532
+ output = self.dense(context_layer)
533
+
534
+ return output, kv_cache
535
+
536
+
537
+ def _config_to_kwargs(args):
538
+ common_kwargs = {
539
+ "dtype": args.torch_dtype,
540
+ }
541
+ return common_kwargs
542
+
543
+
544
+ class MLP(torch.nn.Module):
545
+ """MLP.
546
+
547
+ MLP will take the input with h hidden state, project it to 4*h
548
+ hidden dimension, perform nonlinear transformation, and project the
549
+ state back into h hidden dimension.
550
+ """
551
+
552
+ def __init__(self, config: ChatGLMConfig, device=None):
553
+ super(MLP, self).__init__()
554
+
555
+ self.add_bias = config.add_bias_linear
556
+
557
+ # Project to 4h. If using swiglu double the output width, see https://arxiv.org/pdf/2002.05202.pdf
558
+ self.dense_h_to_4h = nn.Linear(
559
+ config.hidden_size,
560
+ config.ffn_hidden_size * 2,
561
+ bias=self.add_bias,
562
+ device=device,
563
+ **_config_to_kwargs(config)
564
+ )
565
+
566
+ def swiglu(x):
567
+ x = torch.chunk(x, 2, dim=-1)
568
+ return F.silu(x[0]) * x[1]
569
+
570
+ self.activation_func = swiglu
571
+
572
+ # Project back to h.
573
+ self.dense_4h_to_h = nn.Linear(
574
+ config.ffn_hidden_size,
575
+ config.hidden_size,
576
+ bias=self.add_bias,
577
+ device=device,
578
+ **_config_to_kwargs(config)
579
+ )
580
+
581
+ def forward(self, hidden_states):
582
+ # [s, b, 4hp]
583
+ intermediate_parallel = self.dense_h_to_4h(hidden_states)
584
+ intermediate_parallel = self.activation_func(intermediate_parallel)
585
+ # [s, b, h]
586
+ output = self.dense_4h_to_h(intermediate_parallel)
587
+ return output
588
+
589
+
590
+ class GLMBlock(torch.nn.Module):
591
+ """A single transformer layer.
592
+
593
+ Transformer layer takes input with size [s, b, h] and returns an
594
+ output of the same size.
595
+ """
596
+
597
+ def __init__(self, config: ChatGLMConfig, layer_number, device=None):
598
+ super(GLMBlock, self).__init__()
599
+ self.layer_number = layer_number
600
+
601
+ self.apply_residual_connection_post_layernorm = config.apply_residual_connection_post_layernorm
602
+
603
+ self.fp32_residual_connection = config.fp32_residual_connection
604
+
605
+ LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm
606
+ # Layernorm on the input data.
607
+ self.input_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,
608
+ dtype=config.torch_dtype)
609
+
610
+ # Self attention.
611
+ self.self_attention = SelfAttention(config, layer_number, device=device)
612
+ self.hidden_dropout = config.hidden_dropout
613
+
614
+ # Layernorm on the attention output
615
+ self.post_attention_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,
616
+ dtype=config.torch_dtype)
617
+
618
+ # MLP
619
+ self.mlp = MLP(config, device=device)
620
+
621
+ def forward(
622
+ self, hidden_states, attention_mask, rotary_pos_emb, kv_cache=None, use_cache=True,
623
+ ):
624
+ # hidden_states: [s, b, h]
625
+
626
+ # Layer norm at the beginning of the transformer layer.
627
+ layernorm_output = self.input_layernorm(hidden_states)
628
+ # Self attention.
629
+ attention_output, kv_cache = self.self_attention(
630
+ layernorm_output,
631
+ attention_mask,
632
+ rotary_pos_emb,
633
+ kv_cache=kv_cache,
634
+ use_cache=use_cache
635
+ )
636
+
637
+ # Residual connection.
638
+ if self.apply_residual_connection_post_layernorm:
639
+ residual = layernorm_output
640
+ else:
641
+ residual = hidden_states
642
+
643
+ layernorm_input = torch.nn.functional.dropout(attention_output, p=self.hidden_dropout, training=self.training)
644
+ layernorm_input = residual + layernorm_input
645
+
646
+ # Layer norm post the self attention.
647
+ layernorm_output = self.post_attention_layernorm(layernorm_input)
648
+
649
+ # MLP.
650
+ mlp_output = self.mlp(layernorm_output)
651
+
652
+ # Second residual connection.
653
+ if self.apply_residual_connection_post_layernorm:
654
+ residual = layernorm_output
655
+ else:
656
+ residual = layernorm_input
657
+
658
+ output = torch.nn.functional.dropout(mlp_output, p=self.hidden_dropout, training=self.training)
659
+ output = residual + output
660
+
661
+ return output, kv_cache
662
+
663
+
664
+ class GLMTransformer(torch.nn.Module):
665
+ """Transformer class."""
666
+
667
+ def __init__(self, config: ChatGLMConfig, device=None):
668
+ super(GLMTransformer, self).__init__()
669
+
670
+ self.fp32_residual_connection = config.fp32_residual_connection
671
+ self.post_layer_norm = config.post_layer_norm
672
+
673
+ # Number of layers.
674
+ self.num_layers = config.num_layers
675
+
676
+ # Transformer layers.
677
+ def build_layer(layer_number):
678
+ return GLMBlock(config, layer_number, device=device)
679
+
680
+ self.layers = torch.nn.ModuleList([build_layer(i + 1) for i in range(self.num_layers)])
681
+
682
+ if self.post_layer_norm:
683
+ LayerNormFunc = RMSNorm if config.rmsnorm else LayerNorm
684
+ # Final layer norm before output.
685
+ self.final_layernorm = LayerNormFunc(config.hidden_size, eps=config.layernorm_epsilon, device=device,
686
+ dtype=config.torch_dtype)
687
+
688
+ self.gradient_checkpointing = False
689
+
690
+ def _get_layer(self, layer_number):
691
+ return self.layers[layer_number]
692
+
693
+ def forward(
694
+ self, hidden_states, attention_mask, rotary_pos_emb, kv_caches=None,
695
+ use_cache: Optional[bool] = True,
696
+ output_hidden_states: Optional[bool] = False,
697
+ ):
698
+ if not kv_caches:
699
+ kv_caches = [None for _ in range(self.num_layers)]
700
+ presents = () if use_cache else None
701
+ if self.gradient_checkpointing and self.training:
702
+ if use_cache:
703
+ logger.warning_once(
704
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
705
+ )
706
+ use_cache = False
707
+
708
+ all_self_attentions = None
709
+ all_hidden_states = () if output_hidden_states else None
710
+ for index in range(self.num_layers):
711
+ if output_hidden_states:
712
+ all_hidden_states = all_hidden_states + (hidden_states,)
713
+
714
+ layer = self._get_layer(index)
715
+ if self.gradient_checkpointing and self.training:
716
+ layer_ret = torch.utils.checkpoint.checkpoint(
717
+ layer,
718
+ hidden_states,
719
+ attention_mask,
720
+ rotary_pos_emb,
721
+ kv_caches[index],
722
+ use_cache,
723
+ use_reentrant=False
724
+ )
725
+ else:
726
+ layer_ret = layer(
727
+ hidden_states,
728
+ attention_mask,
729
+ rotary_pos_emb,
730
+ kv_cache=kv_caches[index],
731
+ use_cache=use_cache
732
+ )
733
+ hidden_states, kv_cache = layer_ret
734
+ if use_cache:
735
+ # token by token decoding, use tuple format
736
+ if kv_caches[0] is not None:
737
+ presents = presents + (kv_cache,)
738
+ # prefilling in decoding, use tensor format to save cuda memory
739
+ else:
740
+ if len(presents) == 0:
741
+ presents = kv_cache
742
+ else:
743
+ presents = torch.cat((presents, kv_cache.to(presents.device)), dim=0)
744
+
745
+ if output_hidden_states:
746
+ all_hidden_states = all_hidden_states + (hidden_states,)
747
+
748
+ # Final layer norm.
749
+ if self.post_layer_norm:
750
+ hidden_states = self.final_layernorm(hidden_states)
751
+
752
+ return hidden_states, presents, all_hidden_states, all_self_attentions
753
+
754
+
755
+ class ChatGLMPreTrainedModel(PreTrainedModel):
756
+ """
757
+ An abstract class to handle weights initialization and
758
+ a simple interface for downloading and loading pretrained models.
759
+ """
760
+
761
+ is_parallelizable = False
762
+ supports_gradient_checkpointing = True
763
+ config_class = ChatGLMConfig
764
+ base_model_prefix = "transformer"
765
+ _no_split_modules = ["GLMBlock"]
766
+ _supports_flash_attn_2 = True
767
+ _supports_sdpa = True
768
+
769
+ def _init_weights(self, module: nn.Module):
770
+ """Initialize the weights."""
771
+ return
772
+
773
+ def get_masks(self, input_ids, past_key_values, padding_mask=None):
774
+ if self.config._attn_implementation == "flash_attention_2":
775
+ if padding_mask is not None and not padding_mask.all():
776
+ return padding_mask
777
+ return None
778
+ batch_size, seq_length = input_ids.shape
779
+ full_attention_mask = torch.ones(batch_size, seq_length, seq_length, device=input_ids.device)
780
+ full_attention_mask.tril_()
781
+ past_length = 0
782
+ if past_key_values:
783
+ past_length = past_key_values[0][0].shape[2]
784
+ if past_length:
785
+ full_attention_mask = torch.cat((torch.ones(batch_size, seq_length, past_length,
786
+ device=input_ids.device), full_attention_mask), dim=-1)
787
+ if padding_mask is not None:
788
+ full_attention_mask = full_attention_mask * padding_mask.unsqueeze(1)
789
+ if not past_length and padding_mask is not None:
790
+ full_attention_mask -= padding_mask.unsqueeze(-1) - 1
791
+ full_attention_mask = (full_attention_mask < 0.5).bool()
792
+ full_attention_mask.unsqueeze_(1)
793
+ return full_attention_mask
794
+
795
+ def get_position_ids(self, input_ids, device):
796
+ batch_size, seq_length = input_ids.shape
797
+ position_ids = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0).repeat(batch_size, 1)
798
+ return position_ids
799
+
800
+ class Embedding(torch.nn.Module):
801
+ """Language model embeddings."""
802
+
803
+ def __init__(self, config: ChatGLMConfig, device=None):
804
+ super(Embedding, self).__init__()
805
+
806
+ self.hidden_size = config.hidden_size
807
+ # Word embeddings (parallel).
808
+ self.word_embeddings = nn.Embedding(
809
+ config.padded_vocab_size,
810
+ self.hidden_size,
811
+ dtype=config.torch_dtype,
812
+ device=device
813
+ )
814
+ self.fp32_residual_connection = config.fp32_residual_connection
815
+
816
+ def forward(self, input_ids):
817
+ # Embeddings.
818
+ words_embeddings = self.word_embeddings(input_ids)
819
+ embeddings = words_embeddings
820
+ # If the input flag for fp32 residual connection is set, convert for float.
821
+ if self.fp32_residual_connection:
822
+ embeddings = embeddings.float()
823
+ return embeddings
824
+
825
+
826
+ class ChatGLMModel(ChatGLMPreTrainedModel):
827
+ def __init__(self, config: ChatGLMConfig, device=None, empty_init=True):
828
+ super().__init__(config)
829
+ if empty_init:
830
+ init_method = skip_init
831
+ else:
832
+ init_method = default_init
833
+ init_kwargs = {}
834
+ if device is not None:
835
+ init_kwargs["device"] = device
836
+ self.embedding = init_method(Embedding, config, **init_kwargs)
837
+ self.num_layers = config.num_layers
838
+ self.multi_query_group_num = config.multi_query_group_num
839
+ self.kv_channels = config.kv_channels
840
+
841
+ # Rotary positional embeddings
842
+ self.seq_length = config.seq_length
843
+ rotary_dim = (
844
+ config.hidden_size // config.num_attention_heads if config.kv_channels is None else config.kv_channels
845
+ )
846
+
847
+ self.rotary_pos_emb = RotaryEmbedding(rotary_dim // 2, rope_ratio=config.rope_ratio,
848
+ original_impl=config.original_rope,
849
+ device=device, dtype=config.torch_dtype)
850
+ self.encoder = init_method(GLMTransformer, config, **init_kwargs)
851
+ self.output_layer = init_method(nn.Linear, config.hidden_size, config.padded_vocab_size, bias=False,
852
+ dtype=config.torch_dtype, **init_kwargs)
853
+
854
+ def get_input_embeddings(self):
855
+ return self.embedding.word_embeddings
856
+
857
+ def set_input_embeddings(self, value):
858
+ self.embedding.word_embeddings = value
859
+
860
+ def forward(
861
+ self,
862
+ input_ids,
863
+ position_ids: Optional[torch.Tensor] = None,
864
+ attention_mask: Optional[torch.BoolTensor] = None,
865
+ full_attention_mask: Optional[torch.BoolTensor] = None,
866
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
867
+ inputs_embeds: Optional[torch.Tensor] = None,
868
+ use_cache: Optional[bool] = None,
869
+ output_attentions: Optional[bool] = None,
870
+ output_hidden_states: Optional[bool] = None,
871
+ return_dict: Optional[bool] = None,
872
+ ):
873
+ output_hidden_states = (
874
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
875
+ )
876
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
877
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
878
+
879
+ batch_size, seq_length = input_ids.shape
880
+
881
+ if inputs_embeds is None:
882
+ inputs_embeds = self.embedding(input_ids)
883
+
884
+ if full_attention_mask is None:
885
+ if (attention_mask is not None and not attention_mask.all()) or (past_key_values and seq_length != 1):
886
+ full_attention_mask = self.get_masks(input_ids, past_key_values, padding_mask=attention_mask)
887
+
888
+ # Rotary positional embeddings
889
+ rotary_pos_emb = self.rotary_pos_emb(self.seq_length)
890
+ if position_ids is not None:
891
+ rotary_pos_emb = rotary_pos_emb[position_ids]
892
+ else:
893
+ rotary_pos_emb = rotary_pos_emb[None, :seq_length]
894
+
895
+ # Run encoder.
896
+ hidden_states, presents, all_hidden_states, all_self_attentions = self.encoder(
897
+ inputs_embeds, full_attention_mask, rotary_pos_emb=rotary_pos_emb,
898
+ kv_caches=past_key_values, use_cache=use_cache, output_hidden_states=output_hidden_states
899
+ )
900
+ if presents is not None and type(presents) is torch.Tensor:
901
+ presents = presents.split(1, dim=0)
902
+ presents = list(presents)
903
+ presents = [list(x.squeeze(0).split(1, dim=0)) for x in presents]
904
+ presents = [tuple([x.squeeze(0) for x in y]) for y in presents]
905
+ presents = tuple(presents)
906
+
907
+ if not return_dict:
908
+ return tuple(v for v in [hidden_states, presents, all_hidden_states, all_self_attentions] if v is not None)
909
+
910
+ return BaseModelOutputWithPast(
911
+ last_hidden_state=hidden_states,
912
+ past_key_values=presents,
913
+ hidden_states=all_hidden_states,
914
+ attentions=all_self_attentions,
915
+ )
916
+
917
+
918
+ class ChatGLMForConditionalGeneration(ChatGLMPreTrainedModel):
919
+ def __init__(self, config: ChatGLMConfig, empty_init=True, device=None):
920
+ super().__init__(config)
921
+
922
+ self.max_sequence_length = config.max_length
923
+ self.transformer = ChatGLMModel(config, empty_init=empty_init, device=device)
924
+ self.config = config
925
+
926
+ def _update_model_kwargs_for_generation(
927
+ self,
928
+ outputs: ModelOutput,
929
+ model_kwargs: Dict[str, Any],
930
+ is_encoder_decoder: bool = False,
931
+ ) -> Dict[str, Any]:
932
+ # update past_key_values
933
+ cache_name, cache = self._extract_past_from_model_output(outputs)
934
+ model_kwargs[cache_name] = cache
935
+
936
+ # update attention mask
937
+ if "attention_mask" in model_kwargs:
938
+ attention_mask = model_kwargs["attention_mask"]
939
+ model_kwargs["attention_mask"] = torch.cat(
940
+ [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1
941
+ )
942
+
943
+ # update position ids
944
+ if "position_ids" in model_kwargs:
945
+ position_ids = model_kwargs["position_ids"]
946
+ new_position_id = position_ids[..., -1:].clone()
947
+ new_position_id += 1
948
+ model_kwargs["position_ids"] = torch.cat(
949
+ [position_ids, new_position_id], dim=-1
950
+ )
951
+
952
+ model_kwargs["is_first_forward"] = False
953
+ return model_kwargs
954
+
955
+ def prepare_inputs_for_generation(
956
+ self,
957
+ input_ids: torch.LongTensor,
958
+ past_key_values: Optional[torch.Tensor] = None,
959
+ attention_mask: Optional[torch.Tensor] = None,
960
+ position_ids: Optional[torch.Tensor] = None,
961
+ use_cache: Optional[bool] = None,
962
+ is_first_forward: bool = True,
963
+ **kwargs
964
+ ) -> dict:
965
+ # only last token for input_ids if past is not None
966
+ if position_ids is None:
967
+ position_ids = self.get_position_ids(input_ids, device=input_ids.device)
968
+ if not is_first_forward:
969
+ if past_key_values is not None:
970
+ position_ids = position_ids[..., -1:]
971
+ input_ids = input_ids[:, -1:]
972
+ return {
973
+ "input_ids": input_ids,
974
+ "past_key_values": past_key_values,
975
+ "position_ids": position_ids,
976
+ "attention_mask": attention_mask,
977
+ "return_last_logit": True,
978
+ "use_cache": use_cache
979
+ }
980
+
981
+ def forward(
982
+ self,
983
+ input_ids: Optional[torch.Tensor] = None,
984
+ position_ids: Optional[torch.Tensor] = None,
985
+ attention_mask: Optional[torch.Tensor] = None,
986
+ past_key_values: Optional[Tuple[torch.FloatTensor]] = None,
987
+ inputs_embeds: Optional[torch.Tensor] = None,
988
+ labels: Optional[torch.Tensor] = None,
989
+ use_cache: Optional[bool] = None,
990
+ output_attentions: Optional[bool] = None,
991
+ output_hidden_states: Optional[bool] = None,
992
+ return_dict: Optional[bool] = None,
993
+ return_last_logit: Optional[bool] = False,
994
+ ):
995
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
996
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
997
+
998
+ transformer_outputs = self.transformer(
999
+ input_ids=input_ids,
1000
+ position_ids=position_ids,
1001
+ attention_mask=attention_mask,
1002
+ past_key_values=past_key_values,
1003
+ inputs_embeds=inputs_embeds,
1004
+ use_cache=use_cache,
1005
+ output_hidden_states=output_hidden_states,
1006
+ return_dict=return_dict,
1007
+ )
1008
+
1009
+ hidden_states = transformer_outputs[0]
1010
+ if return_last_logit:
1011
+ hidden_states = hidden_states[:, -1:]
1012
+ lm_logits = self.transformer.output_layer(hidden_states)
1013
+
1014
+ loss = None
1015
+ if labels is not None:
1016
+ lm_logits = lm_logits.to(torch.float32)
1017
+
1018
+ # Shift so that tokens < n predict n
1019
+ shift_logits = lm_logits[..., :-1, :].contiguous()
1020
+ shift_labels = labels[..., 1:].contiguous()
1021
+ # Flatten the tokens
1022
+ loss_fct = CrossEntropyLoss(ignore_index=-100)
1023
+ loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
1024
+
1025
+ lm_logits = lm_logits.to(hidden_states.dtype)
1026
+ loss = loss.to(hidden_states.dtype)
1027
+
1028
+ if not return_dict:
1029
+ output = (lm_logits,) + transformer_outputs[1:]
1030
+ return ((loss,) + output) if loss is not None else output
1031
+
1032
+ return CausalLMOutputWithPast(
1033
+ loss=loss,
1034
+ logits=lm_logits,
1035
+ past_key_values=transformer_outputs.past_key_values,
1036
+ hidden_states=transformer_outputs.hidden_states,
1037
+ attentions=transformer_outputs.attentions,
1038
+ )
1039
+
1040
+ @staticmethod
1041
+ def _reorder_cache(
1042
+ past: Tuple[Tuple[torch.Tensor, torch.Tensor], ...], beam_idx: torch.LongTensor
1043
+ ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], ...]:
1044
+ """
1045
+ This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or
1046
+ [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct
1047
+ beam_idx at every generation step.
1048
+
1049
+ Output shares the same memory storage as `past`.
1050
+ """
1051
+ return tuple(
1052
+ (
1053
+ layer_past[0].index_select(0, beam_idx.to(layer_past[0].device)),
1054
+ layer_past[1].index_select(0, beam_idx.to(layer_past[1].device)),
1055
+ )
1056
+ for layer_past in past
1057
+ )
1058
+
1059
+ class ChatGLMForSequenceClassification(ChatGLMPreTrainedModel):
1060
+ def __init__(self, config: ChatGLMConfig, empty_init=True, device=None):
1061
+ super().__init__(config)
1062
+
1063
+ self.num_labels = config.num_labels
1064
+ self.transformer = ChatGLMModel(config, empty_init=empty_init, device=device)
1065
+
1066
+ self.classifier_head = nn.Linear(config.hidden_size, config.num_labels, bias=True, dtype=config.torch_dtype)
1067
+ if config.classifier_dropout is not None:
1068
+ self.dropout = nn.Dropout(config.classifier_dropout)
1069
+ else:
1070
+ self.dropout = None
1071
+ self.config = config
1072
+
1073
+ def forward(
1074
+ self,
1075
+ input_ids: Optional[torch.LongTensor] = None,
1076
+ position_ids: Optional[torch.LongTensor] = None,
1077
+ attention_mask: Optional[torch.Tensor] = None,
1078
+ full_attention_mask: Optional[torch.Tensor] = None,
1079
+ past_key_values: Optional[Tuple[Tuple[torch.Tensor, torch.Tensor], ...]] = None,
1080
+ inputs_embeds: Optional[torch.LongTensor] = None,
1081
+ labels: Optional[torch.LongTensor] = None,
1082
+ use_cache: Optional[bool] = None,
1083
+ output_attentions: Optional[bool] = None,
1084
+ output_hidden_states: Optional[bool] = None,
1085
+ return_dict: Optional[bool] = None,
1086
+ ) -> Union[Tuple[torch.Tensor, ...], SequenceClassifierOutputWithPast]:
1087
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1088
+
1089
+ transformer_outputs = self.transformer(
1090
+ input_ids=input_ids,
1091
+ position_ids=position_ids,
1092
+ attention_mask=attention_mask,
1093
+ full_attention_mask=full_attention_mask,
1094
+ past_key_values=past_key_values,
1095
+ inputs_embeds=inputs_embeds,
1096
+ use_cache=use_cache,
1097
+ output_attentions=output_attentions,
1098
+ output_hidden_states=output_hidden_states,
1099
+ return_dict=return_dict,
1100
+ )
1101
+
1102
+ hidden_states = transformer_outputs[0]
1103
+ pooled_hidden_states = hidden_states[:, -1]
1104
+ if self.dropout is not None:
1105
+ pooled_hidden_states = self.dropout(pooled_hidden_states)
1106
+ logits = self.classifier_head(pooled_hidden_states)
1107
+
1108
+ loss = None
1109
+ if labels is not None:
1110
+ if self.config.problem_type is None:
1111
+ if self.num_labels == 1:
1112
+ self.config.problem_type = "regression"
1113
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1114
+ self.config.problem_type = "single_label_classification"
1115
+ else:
1116
+ self.config.problem_type = "multi_label_classification"
1117
+
1118
+ if self.config.problem_type == "regression":
1119
+ loss_fct = MSELoss()
1120
+ if self.num_labels == 1:
1121
+ loss = loss_fct(logits.squeeze().float(), labels.squeeze())
1122
+ else:
1123
+ loss = loss_fct(logits.float(), labels)
1124
+ elif self.config.problem_type == "single_label_classification":
1125
+ loss_fct = CrossEntropyLoss()
1126
+ loss = loss_fct(logits.view(-1, self.num_labels).float(), labels.view(-1))
1127
+ elif self.config.problem_type == "multi_label_classification":
1128
+ loss_fct = BCEWithLogitsLoss()
1129
+ loss = loss_fct(logits.float(), labels.view(-1, self.num_labels))
1130
+
1131
+ if not return_dict:
1132
+ output = (logits,) + transformer_outputs[1:]
1133
+ return ((loss,) + output) if loss is not None else output
1134
+
1135
+ return SequenceClassifierOutputWithPast(
1136
+ loss=loss,
1137
+ logits=logits,
1138
+ past_key_values=transformer_outputs.past_key_values,
1139
+ hidden_states=transformer_outputs.hidden_states,
1140
+ attentions=transformer_outputs.attentions,
1141
+ )