raufrajar commited on
Commit
dfc8665
·
verified ·
1 Parent(s): f60ae84

Add files using upload-large-folder tool

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ license_link: https://huggingface.co/microsoft/Phi-4-mini-instruct/resolve/main/LICENSE
4
+ language:
5
+ - multilingual
6
+ pipeline_tag: text-generation
7
+ tags:
8
+ - nlp
9
+ - code
10
+ - mlx
11
+ widget:
12
+ - messages:
13
+ - role: user
14
+ content: Can you provide ways to eat combinations of bananas and dragonfruits?
15
+ library_name: transformers
16
+ base_model: microsoft/Phi-4-mini-instruct
17
+ ---
18
+
19
+ # raufrajar/Phi-4-mini-instruct-8bit
20
+
21
+ The Model [raufrajar/Phi-4-mini-instruct-8bit](https://huggingface.co/raufrajar/Phi-4-mini-instruct-8bit) was
22
+ converted to MLX format from [microsoft/Phi-4-mini-instruct](https://huggingface.co/microsoft/Phi-4-mini-instruct)
23
+ using mlx-lm version **0.21.5**.
24
+
25
+ ## Use with mlx
26
+
27
+ ```bash
28
+ pip install mlx-lm
29
+ ```
30
+
31
+ ```python
32
+ from mlx_lm import load, generate
33
+
34
+ model, tokenizer = load("raufrajar/Phi-4-mini-instruct-8bit")
35
+
36
+ prompt = "hello"
37
+
38
+ if tokenizer.chat_template is not None:
39
+ messages = [{"role": "user", "content": prompt}]
40
+ prompt = tokenizer.apply_chat_template(
41
+ messages, add_generation_prompt=True
42
+ )
43
+
44
+ response = generate(model, tokenizer, prompt=prompt, verbose=True)
45
+ ```
added_tokens.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<|/tool_call|>": 200026,
3
+ "<|/tool|>": 200024,
4
+ "<|assistant|>": 200019,
5
+ "<|end|>": 200020,
6
+ "<|system|>": 200022,
7
+ "<|tag|>": 200028,
8
+ "<|tool_call|>": 200025,
9
+ "<|tool_response|>": 200027,
10
+ "<|tool|>": 200023,
11
+ "<|user|>": 200021
12
+ }
config.json ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Phi3ForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_phi3.Phi3Config",
9
+ "AutoModelForCausalLM": "modeling_phi3.Phi3ForCausalLM",
10
+ "AutoTokenizer": "Xenova/gpt-4o"
11
+ },
12
+ "bos_token_id": 199999,
13
+ "embd_pdrop": 0.0,
14
+ "eos_token_id": 199999,
15
+ "full_attn_mod": 1,
16
+ "hidden_act": "silu",
17
+ "hidden_size": 3072,
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 8192,
20
+ "interpolate_factor": 1,
21
+ "lm_head_bias": false,
22
+ "max_position_embeddings": 131072,
23
+ "mlp_bias": false,
24
+ "model_type": "phi3",
25
+ "num_attention_heads": 24,
26
+ "num_hidden_layers": 32,
27
+ "num_key_value_heads": 8,
28
+ "original_max_position_embeddings": 4096,
29
+ "pad_token_id": 199999,
30
+ "partial_rotary_factor": 0.75,
31
+ "quantization": {
32
+ "group_size": 64,
33
+ "bits": 8
34
+ },
35
+ "quantization_config": {
36
+ "group_size": 64,
37
+ "bits": 8
38
+ },
39
+ "resid_pdrop": 0.0,
40
+ "rms_norm_eps": 1e-05,
41
+ "rope_scaling": {
42
+ "long_factor": [
43
+ 1,
44
+ 1.118320672,
45
+ 1.250641126,
46
+ 1.398617824,
47
+ 1.564103225,
48
+ 1.74916897,
49
+ 1.956131817,
50
+ 2.187582649,
51
+ 2.446418898,
52
+ 2.735880826,
53
+ 3.059592084,
54
+ 3.421605075,
55
+ 3.826451687,
56
+ 4.279200023,
57
+ 4.785517845,
58
+ 5.351743533,
59
+ 5.984965424,
60
+ 6.693110555,
61
+ 7.485043894,
62
+ 8.370679318,
63
+ 9.36110372,
64
+ 10.4687158,
65
+ 11.70738129,
66
+ 13.09260651,
67
+ 14.64173252,
68
+ 16.37415215,
69
+ 18.31155283,
70
+ 20.47818807,
71
+ 22.90118105,
72
+ 25.61086418,
73
+ 28.64115884,
74
+ 32.03,
75
+ 32.1,
76
+ 32.13,
77
+ 32.23,
78
+ 32.6,
79
+ 32.61,
80
+ 32.64,
81
+ 32.66,
82
+ 32.7,
83
+ 32.71,
84
+ 32.93,
85
+ 32.97,
86
+ 33.28,
87
+ 33.49,
88
+ 33.5,
89
+ 44.16,
90
+ 47.77
91
+ ],
92
+ "short_factor": [
93
+ 1.0,
94
+ 1.0,
95
+ 1.0,
96
+ 1.0,
97
+ 1.0,
98
+ 1.0,
99
+ 1.0,
100
+ 1.0,
101
+ 1.0,
102
+ 1.0,
103
+ 1.0,
104
+ 1.0,
105
+ 1.0,
106
+ 1.0,
107
+ 1.0,
108
+ 1.0,
109
+ 1.0,
110
+ 1.0,
111
+ 1.0,
112
+ 1.0,
113
+ 1.0,
114
+ 1.0,
115
+ 1.0,
116
+ 1.0,
117
+ 1.0,
118
+ 1.0,
119
+ 1.0,
120
+ 1.0,
121
+ 1.0,
122
+ 1.0,
123
+ 1.0,
124
+ 1.0,
125
+ 1.0,
126
+ 1.0,
127
+ 1.0,
128
+ 1.0,
129
+ 1.0,
130
+ 1.0,
131
+ 1.0,
132
+ 1.0,
133
+ 1.0,
134
+ 1.0,
135
+ 1.0,
136
+ 1.0,
137
+ 1.0,
138
+ 1.0,
139
+ 1.0,
140
+ 1.0
141
+ ],
142
+ "type": "longrope"
143
+ },
144
+ "rope_theta": 10000.0,
145
+ "sliding_window": 262144,
146
+ "tie_word_embeddings": true,
147
+ "torch_dtype": "bfloat16",
148
+ "transformers_version": "4.45.0",
149
+ "use_cache": true,
150
+ "vocab_size": 200064
151
+ }
configuration_phi3.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """Phi-3 model configuration"""
17
+
18
+ from transformers.configuration_utils import PretrainedConfig
19
+ from transformers.utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ class Phi3Config(PretrainedConfig):
26
+ r"""
27
+ This is the configuration class to store the configuration of a [`Phi3Model`]. It is used to instantiate a Phi-3
28
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
29
+ defaults will yield a similar configuration to that of the
30
+ [microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct).
31
+
32
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
33
+ documentation from [`PretrainedConfig`] for more information.
34
+
35
+ Args:
36
+ vocab_size (`int`, *optional*, defaults to 32064):
37
+ Vocabulary size of the Phi-3 model. Defines the number of different tokens that can be represented by the
38
+ `inputs_ids` passed when calling [`Phi3Model`].
39
+ hidden_size (`int`, *optional*, defaults to 3072):
40
+ Dimension of the hidden representations.
41
+ intermediate_size (`int`, *optional*, defaults to 8192):
42
+ Dimension of the MLP representations.
43
+ num_hidden_layers (`int`, *optional*, defaults to 32):
44
+ Number of hidden layers in the Transformer decoder.
45
+ num_attention_heads (`int`, *optional*, defaults to 32):
46
+ Number of attention heads for each attention layer in the Transformer decoder.
47
+ num_key_value_heads (`int`, *optional*):
48
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
49
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
50
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
51
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
52
+ by meanpooling all the original heads within that group. For more details checkout [this
53
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
54
+ `num_attention_heads`.
55
+ resid_pdrop (`float`, *optional*, defaults to 0.0):
56
+ Dropout probability for mlp outputs.
57
+ embd_pdrop (`int`, *optional*, defaults to 0.0):
58
+ The dropout ratio for the embeddings.
59
+ attention_dropout (`float`, *optional*, defaults to 0.0):
60
+ The dropout ratio after computing the attention scores.
61
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
62
+ The non-linear activation function (function or string) in the decoder.
63
+ max_position_embeddings (`int`, *optional*, defaults to 4096):
64
+ The maximum sequence length that this model might ever be used with.
65
+ original_max_position_embeddings (`int`, *optional*, defaults to 4096):
66
+ The maximum sequence length that this model was trained with. This is used to determine the size of the
67
+ original RoPE embeddings when using long scaling.
68
+ initializer_range (`float`, *optional*, defaults to 0.02):
69
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
70
+ rms_norm_eps (`float`, *optional*, defaults to 1e-05):
71
+ The epsilon value used for the RMSNorm.
72
+ use_cache (`bool`, *optional*, defaults to `True`):
73
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
74
+ relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not.
75
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
76
+ Whether to tie weight embeddings
77
+ rope_theta (`float`, *optional*, defaults to 10000.0):
78
+ The base period of the RoPE embeddings.
79
+ rope_scaling (`dict`, *optional*):
80
+ The scaling strategy for the RoPE embeddings. If `None`, no scaling is applied. If a dictionary, it must
81
+ contain the following keys: `type`, `short_factor` and `long_factor`. The `type` must be `longrope` and
82
+ the `short_factor` and `long_factor` must be lists of numbers with the same length as the hidden size
83
+ divided by the number of attention heads divided by 2.
84
+ partial_rotary_factor (`float`, *optional*, defaults to 1.0):
85
+ Percentage of the query and keys which will have rotary embedding. Must be between 0.0 and 1.0.
86
+ bos_token_id (`int`, *optional*, defaults to 1):
87
+ The id of the "beginning-of-sequence" token.
88
+ eos_token_id (`int`, *optional*, defaults to 32000):
89
+ The id of the "end-of-sequence" token.
90
+ pad_token_id (`int`, *optional*, defaults to 32000):
91
+ The id of the padding token.
92
+ sliding_window (`int`, *optional*):
93
+ Sliding window attention window size. If `None`, no sliding window is applied.
94
+
95
+ Example:
96
+
97
+ ```python
98
+ >>> from transformers import Phi3Model, Phi3Config
99
+
100
+ >>> # Initializing a Phi-3 style configuration
101
+ >>> configuration = Phi3Config.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
102
+
103
+ >>> # Initializing a model from the configuration
104
+ >>> model = Phi3Model(configuration)
105
+
106
+ >>> # Accessing the model configuration
107
+ >>> configuration = model.config
108
+ ```"""
109
+
110
+ model_type = "phi3"
111
+ keys_to_ignore_at_inference = ["past_key_values"]
112
+
113
+ def __init__(
114
+ self,
115
+ vocab_size=32064,
116
+ hidden_size=3072,
117
+ intermediate_size=8192,
118
+ num_hidden_layers=32,
119
+ num_attention_heads=32,
120
+ num_key_value_heads=None,
121
+ resid_pdrop=0.0,
122
+ embd_pdrop=0.0,
123
+ attention_dropout=0.0,
124
+ hidden_act="silu",
125
+ max_position_embeddings=4096,
126
+ original_max_position_embeddings=4096,
127
+ initializer_range=0.02,
128
+ rms_norm_eps=1e-5,
129
+ use_cache=True,
130
+ tie_word_embeddings=False,
131
+ rope_theta=10000.0,
132
+ rope_scaling=None,
133
+ partial_rotary_factor=1.0,
134
+ bos_token_id=1,
135
+ eos_token_id=32000,
136
+ pad_token_id=32000,
137
+ sliding_window=None,
138
+ **kwargs,
139
+ ):
140
+ self.vocab_size = vocab_size
141
+ self.hidden_size = hidden_size
142
+ self.intermediate_size = intermediate_size
143
+ self.num_hidden_layers = num_hidden_layers
144
+ self.num_attention_heads = num_attention_heads
145
+
146
+ if num_key_value_heads is None:
147
+ num_key_value_heads = num_attention_heads
148
+
149
+ self.num_key_value_heads = num_key_value_heads
150
+ self.resid_pdrop = resid_pdrop
151
+ self.embd_pdrop = embd_pdrop
152
+ self.attention_dropout = attention_dropout
153
+ self.hidden_act = hidden_act
154
+ self.max_position_embeddings = max_position_embeddings
155
+ self.original_max_position_embeddings = original_max_position_embeddings
156
+ self.initializer_range = initializer_range
157
+ self.rms_norm_eps = rms_norm_eps
158
+ self.use_cache = use_cache
159
+ self.rope_theta = rope_theta
160
+ self.rope_scaling = rope_scaling
161
+ self.partial_rotary_factor = partial_rotary_factor
162
+ self._rope_scaling_adjustment()
163
+ self._rope_scaling_validation()
164
+ self.sliding_window = sliding_window
165
+
166
+ super().__init__(
167
+ bos_token_id=bos_token_id,
168
+ eos_token_id=eos_token_id,
169
+ pad_token_id=pad_token_id,
170
+ tie_word_embeddings=tie_word_embeddings,
171
+ **kwargs,
172
+ )
173
+
174
+ def _rope_scaling_adjustment(self):
175
+ """
176
+ Adjust the `type` of the `rope_scaling` configuration for backward compatibility.
177
+ """
178
+ if self.rope_scaling is None:
179
+ return
180
+
181
+ rope_scaling_type = self.rope_scaling.get("type", None)
182
+
183
+ # For backward compatibility if previous version used "su" or "yarn"
184
+ if rope_scaling_type is not None and rope_scaling_type in ["su", "yarn"]:
185
+ self.rope_scaling["type"] = "longrope"
186
+
187
+ def _rope_scaling_validation(self):
188
+ """
189
+ Validate the `rope_scaling` configuration.
190
+ """
191
+ if self.rope_scaling is None:
192
+ return
193
+
194
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 3:
195
+ raise ValueError(
196
+ "`rope_scaling` must be a dictionary with three fields, `type`, `short_factor` and `long_factor`, "
197
+ f"got {self.rope_scaling}"
198
+ )
199
+ rope_scaling_type = self.rope_scaling.get("type", None)
200
+ rope_scaling_short_factor = self.rope_scaling.get("short_factor", None)
201
+ rope_scaling_long_factor = self.rope_scaling.get("long_factor", None)
202
+ if rope_scaling_type is None or rope_scaling_type not in ["longrope"]:
203
+ raise ValueError(f"`rope_scaling`'s type field must be one of ['longrope'], got {rope_scaling_type}")
204
+ if not (
205
+ isinstance(rope_scaling_short_factor, list)
206
+ and all(isinstance(x, (int, float)) for x in rope_scaling_short_factor)
207
+ ):
208
+ raise ValueError(
209
+ f"`rope_scaling`'s short_factor field must be a list of numbers, got {rope_scaling_short_factor}"
210
+ )
211
+ rotary_ndims = int(self.hidden_size // self.num_attention_heads * self.partial_rotary_factor)
212
+ if not len(rope_scaling_short_factor) == rotary_ndims // 2:
213
+ raise ValueError(
214
+ f"`rope_scaling`'s short_factor field must have length {rotary_ndims // 2}, got {len(rope_scaling_short_factor)}"
215
+ )
216
+ if not (
217
+ isinstance(rope_scaling_long_factor, list)
218
+ and all(isinstance(x, (int, float)) for x in rope_scaling_long_factor)
219
+ ):
220
+ raise ValueError(
221
+ f"`rope_scaling`'s long_factor field must be a list of numbers, got {rope_scaling_long_factor}"
222
+ )
223
+ if not len(rope_scaling_long_factor) == rotary_ndims // 2:
224
+ raise ValueError(
225
+ f"`rope_scaling`'s long_factor field must have length {rotary_ndims // 2}, got {len(rope_scaling_long_factor)}"
226
+ )
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7413f823f84d9776d066003abb3a6af912f7ec3c057bfd1fab8b631a676248a
3
+ size 1054132152
model-00002-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fb7417964cf42ae53dd42eb2a3171c7824db7e22967dcd18d189ce60d36acda
3
+ size 1069686227
model-00003-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:448137846337dba3fc853d584e75d9d17c1551948508f538a7cfea9c6325da41
3
+ size 1069686332
model-00004-of-00004.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a747f199dfc5357da44fcd74fb18bdbe8b737f6b5c5b39b41b4185fb3dd79728
3
+ size 882506800
model.safetensors.index.json ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 4075960320
4
+ },
5
+ "weight_map": {
6
+ "model.embed_tokens.biases": "model-00001-of-00004.safetensors",
7
+ "model.embed_tokens.scales": "model-00001-of-00004.safetensors",
8
+ "model.embed_tokens.weight": "model-00001-of-00004.safetensors",
9
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00004.safetensors",
10
+ "model.layers.0.mlp.down_proj.biases": "model-00001-of-00004.safetensors",
11
+ "model.layers.0.mlp.down_proj.scales": "model-00001-of-00004.safetensors",
12
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
13
+ "model.layers.0.mlp.gate_up_proj.biases": "model-00001-of-00004.safetensors",
14
+ "model.layers.0.mlp.gate_up_proj.scales": "model-00001-of-00004.safetensors",
15
+ "model.layers.0.mlp.gate_up_proj.weight": "model-00001-of-00004.safetensors",
16
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
17
+ "model.layers.0.self_attn.o_proj.biases": "model-00001-of-00004.safetensors",
18
+ "model.layers.0.self_attn.o_proj.scales": "model-00001-of-00004.safetensors",
19
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
20
+ "model.layers.0.self_attn.qkv_proj.biases": "model-00001-of-00004.safetensors",
21
+ "model.layers.0.self_attn.qkv_proj.scales": "model-00001-of-00004.safetensors",
22
+ "model.layers.0.self_attn.qkv_proj.weight": "model-00001-of-00004.safetensors",
23
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00004.safetensors",
24
+ "model.layers.1.mlp.down_proj.biases": "model-00001-of-00004.safetensors",
25
+ "model.layers.1.mlp.down_proj.scales": "model-00001-of-00004.safetensors",
26
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
27
+ "model.layers.1.mlp.gate_up_proj.biases": "model-00001-of-00004.safetensors",
28
+ "model.layers.1.mlp.gate_up_proj.scales": "model-00001-of-00004.safetensors",
29
+ "model.layers.1.mlp.gate_up_proj.weight": "model-00001-of-00004.safetensors",
30
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
31
+ "model.layers.1.self_attn.o_proj.biases": "model-00001-of-00004.safetensors",
32
+ "model.layers.1.self_attn.o_proj.scales": "model-00001-of-00004.safetensors",
33
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
34
+ "model.layers.1.self_attn.qkv_proj.biases": "model-00001-of-00004.safetensors",
35
+ "model.layers.1.self_attn.qkv_proj.scales": "model-00001-of-00004.safetensors",
36
+ "model.layers.1.self_attn.qkv_proj.weight": "model-00001-of-00004.safetensors",
37
+ "model.layers.10.input_layernorm.weight": "model-00002-of-00004.safetensors",
38
+ "model.layers.10.mlp.down_proj.biases": "model-00002-of-00004.safetensors",
39
+ "model.layers.10.mlp.down_proj.scales": "model-00002-of-00004.safetensors",
40
+ "model.layers.10.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
41
+ "model.layers.10.mlp.gate_up_proj.biases": "model-00002-of-00004.safetensors",
42
+ "model.layers.10.mlp.gate_up_proj.scales": "model-00002-of-00004.safetensors",
43
+ "model.layers.10.mlp.gate_up_proj.weight": "model-00002-of-00004.safetensors",
44
+ "model.layers.10.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
45
+ "model.layers.10.self_attn.o_proj.biases": "model-00002-of-00004.safetensors",
46
+ "model.layers.10.self_attn.o_proj.scales": "model-00002-of-00004.safetensors",
47
+ "model.layers.10.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
48
+ "model.layers.10.self_attn.qkv_proj.biases": "model-00002-of-00004.safetensors",
49
+ "model.layers.10.self_attn.qkv_proj.scales": "model-00002-of-00004.safetensors",
50
+ "model.layers.10.self_attn.qkv_proj.weight": "model-00002-of-00004.safetensors",
51
+ "model.layers.11.input_layernorm.weight": "model-00002-of-00004.safetensors",
52
+ "model.layers.11.mlp.down_proj.biases": "model-00002-of-00004.safetensors",
53
+ "model.layers.11.mlp.down_proj.scales": "model-00002-of-00004.safetensors",
54
+ "model.layers.11.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
55
+ "model.layers.11.mlp.gate_up_proj.biases": "model-00002-of-00004.safetensors",
56
+ "model.layers.11.mlp.gate_up_proj.scales": "model-00002-of-00004.safetensors",
57
+ "model.layers.11.mlp.gate_up_proj.weight": "model-00002-of-00004.safetensors",
58
+ "model.layers.11.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
59
+ "model.layers.11.self_attn.o_proj.biases": "model-00002-of-00004.safetensors",
60
+ "model.layers.11.self_attn.o_proj.scales": "model-00002-of-00004.safetensors",
61
+ "model.layers.11.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
62
+ "model.layers.11.self_attn.qkv_proj.biases": "model-00002-of-00004.safetensors",
63
+ "model.layers.11.self_attn.qkv_proj.scales": "model-00002-of-00004.safetensors",
64
+ "model.layers.11.self_attn.qkv_proj.weight": "model-00002-of-00004.safetensors",
65
+ "model.layers.12.input_layernorm.weight": "model-00002-of-00004.safetensors",
66
+ "model.layers.12.mlp.down_proj.biases": "model-00002-of-00004.safetensors",
67
+ "model.layers.12.mlp.down_proj.scales": "model-00002-of-00004.safetensors",
68
+ "model.layers.12.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
69
+ "model.layers.12.mlp.gate_up_proj.biases": "model-00002-of-00004.safetensors",
70
+ "model.layers.12.mlp.gate_up_proj.scales": "model-00002-of-00004.safetensors",
71
+ "model.layers.12.mlp.gate_up_proj.weight": "model-00002-of-00004.safetensors",
72
+ "model.layers.12.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
73
+ "model.layers.12.self_attn.o_proj.biases": "model-00002-of-00004.safetensors",
74
+ "model.layers.12.self_attn.o_proj.scales": "model-00002-of-00004.safetensors",
75
+ "model.layers.12.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
76
+ "model.layers.12.self_attn.qkv_proj.biases": "model-00002-of-00004.safetensors",
77
+ "model.layers.12.self_attn.qkv_proj.scales": "model-00002-of-00004.safetensors",
78
+ "model.layers.12.self_attn.qkv_proj.weight": "model-00002-of-00004.safetensors",
79
+ "model.layers.13.input_layernorm.weight": "model-00003-of-00004.safetensors",
80
+ "model.layers.13.mlp.down_proj.biases": "model-00003-of-00004.safetensors",
81
+ "model.layers.13.mlp.down_proj.scales": "model-00003-of-00004.safetensors",
82
+ "model.layers.13.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
83
+ "model.layers.13.mlp.gate_up_proj.biases": "model-00002-of-00004.safetensors",
84
+ "model.layers.13.mlp.gate_up_proj.scales": "model-00002-of-00004.safetensors",
85
+ "model.layers.13.mlp.gate_up_proj.weight": "model-00002-of-00004.safetensors",
86
+ "model.layers.13.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
87
+ "model.layers.13.self_attn.o_proj.biases": "model-00002-of-00004.safetensors",
88
+ "model.layers.13.self_attn.o_proj.scales": "model-00002-of-00004.safetensors",
89
+ "model.layers.13.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
90
+ "model.layers.13.self_attn.qkv_proj.biases": "model-00002-of-00004.safetensors",
91
+ "model.layers.13.self_attn.qkv_proj.scales": "model-00002-of-00004.safetensors",
92
+ "model.layers.13.self_attn.qkv_proj.weight": "model-00002-of-00004.safetensors",
93
+ "model.layers.14.input_layernorm.weight": "model-00003-of-00004.safetensors",
94
+ "model.layers.14.mlp.down_proj.biases": "model-00003-of-00004.safetensors",
95
+ "model.layers.14.mlp.down_proj.scales": "model-00003-of-00004.safetensors",
96
+ "model.layers.14.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
97
+ "model.layers.14.mlp.gate_up_proj.biases": "model-00003-of-00004.safetensors",
98
+ "model.layers.14.mlp.gate_up_proj.scales": "model-00003-of-00004.safetensors",
99
+ "model.layers.14.mlp.gate_up_proj.weight": "model-00003-of-00004.safetensors",
100
+ "model.layers.14.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
101
+ "model.layers.14.self_attn.o_proj.biases": "model-00003-of-00004.safetensors",
102
+ "model.layers.14.self_attn.o_proj.scales": "model-00003-of-00004.safetensors",
103
+ "model.layers.14.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
104
+ "model.layers.14.self_attn.qkv_proj.biases": "model-00003-of-00004.safetensors",
105
+ "model.layers.14.self_attn.qkv_proj.scales": "model-00003-of-00004.safetensors",
106
+ "model.layers.14.self_attn.qkv_proj.weight": "model-00003-of-00004.safetensors",
107
+ "model.layers.15.input_layernorm.weight": "model-00003-of-00004.safetensors",
108
+ "model.layers.15.mlp.down_proj.biases": "model-00003-of-00004.safetensors",
109
+ "model.layers.15.mlp.down_proj.scales": "model-00003-of-00004.safetensors",
110
+ "model.layers.15.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
111
+ "model.layers.15.mlp.gate_up_proj.biases": "model-00003-of-00004.safetensors",
112
+ "model.layers.15.mlp.gate_up_proj.scales": "model-00003-of-00004.safetensors",
113
+ "model.layers.15.mlp.gate_up_proj.weight": "model-00003-of-00004.safetensors",
114
+ "model.layers.15.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
115
+ "model.layers.15.self_attn.o_proj.biases": "model-00003-of-00004.safetensors",
116
+ "model.layers.15.self_attn.o_proj.scales": "model-00003-of-00004.safetensors",
117
+ "model.layers.15.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
118
+ "model.layers.15.self_attn.qkv_proj.biases": "model-00003-of-00004.safetensors",
119
+ "model.layers.15.self_attn.qkv_proj.scales": "model-00003-of-00004.safetensors",
120
+ "model.layers.15.self_attn.qkv_proj.weight": "model-00003-of-00004.safetensors",
121
+ "model.layers.16.input_layernorm.weight": "model-00003-of-00004.safetensors",
122
+ "model.layers.16.mlp.down_proj.biases": "model-00003-of-00004.safetensors",
123
+ "model.layers.16.mlp.down_proj.scales": "model-00003-of-00004.safetensors",
124
+ "model.layers.16.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
125
+ "model.layers.16.mlp.gate_up_proj.biases": "model-00003-of-00004.safetensors",
126
+ "model.layers.16.mlp.gate_up_proj.scales": "model-00003-of-00004.safetensors",
127
+ "model.layers.16.mlp.gate_up_proj.weight": "model-00003-of-00004.safetensors",
128
+ "model.layers.16.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
129
+ "model.layers.16.self_attn.o_proj.biases": "model-00003-of-00004.safetensors",
130
+ "model.layers.16.self_attn.o_proj.scales": "model-00003-of-00004.safetensors",
131
+ "model.layers.16.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
132
+ "model.layers.16.self_attn.qkv_proj.biases": "model-00003-of-00004.safetensors",
133
+ "model.layers.16.self_attn.qkv_proj.scales": "model-00003-of-00004.safetensors",
134
+ "model.layers.16.self_attn.qkv_proj.weight": "model-00003-of-00004.safetensors",
135
+ "model.layers.17.input_layernorm.weight": "model-00003-of-00004.safetensors",
136
+ "model.layers.17.mlp.down_proj.biases": "model-00003-of-00004.safetensors",
137
+ "model.layers.17.mlp.down_proj.scales": "model-00003-of-00004.safetensors",
138
+ "model.layers.17.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
139
+ "model.layers.17.mlp.gate_up_proj.biases": "model-00003-of-00004.safetensors",
140
+ "model.layers.17.mlp.gate_up_proj.scales": "model-00003-of-00004.safetensors",
141
+ "model.layers.17.mlp.gate_up_proj.weight": "model-00003-of-00004.safetensors",
142
+ "model.layers.17.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
143
+ "model.layers.17.self_attn.o_proj.biases": "model-00003-of-00004.safetensors",
144
+ "model.layers.17.self_attn.o_proj.scales": "model-00003-of-00004.safetensors",
145
+ "model.layers.17.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
146
+ "model.layers.17.self_attn.qkv_proj.biases": "model-00003-of-00004.safetensors",
147
+ "model.layers.17.self_attn.qkv_proj.scales": "model-00003-of-00004.safetensors",
148
+ "model.layers.17.self_attn.qkv_proj.weight": "model-00003-of-00004.safetensors",
149
+ "model.layers.18.input_layernorm.weight": "model-00003-of-00004.safetensors",
150
+ "model.layers.18.mlp.down_proj.biases": "model-00003-of-00004.safetensors",
151
+ "model.layers.18.mlp.down_proj.scales": "model-00003-of-00004.safetensors",
152
+ "model.layers.18.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
153
+ "model.layers.18.mlp.gate_up_proj.biases": "model-00003-of-00004.safetensors",
154
+ "model.layers.18.mlp.gate_up_proj.scales": "model-00003-of-00004.safetensors",
155
+ "model.layers.18.mlp.gate_up_proj.weight": "model-00003-of-00004.safetensors",
156
+ "model.layers.18.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
157
+ "model.layers.18.self_attn.o_proj.biases": "model-00003-of-00004.safetensors",
158
+ "model.layers.18.self_attn.o_proj.scales": "model-00003-of-00004.safetensors",
159
+ "model.layers.18.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
160
+ "model.layers.18.self_attn.qkv_proj.biases": "model-00003-of-00004.safetensors",
161
+ "model.layers.18.self_attn.qkv_proj.scales": "model-00003-of-00004.safetensors",
162
+ "model.layers.18.self_attn.qkv_proj.weight": "model-00003-of-00004.safetensors",
163
+ "model.layers.19.input_layernorm.weight": "model-00003-of-00004.safetensors",
164
+ "model.layers.19.mlp.down_proj.biases": "model-00003-of-00004.safetensors",
165
+ "model.layers.19.mlp.down_proj.scales": "model-00003-of-00004.safetensors",
166
+ "model.layers.19.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
167
+ "model.layers.19.mlp.gate_up_proj.biases": "model-00003-of-00004.safetensors",
168
+ "model.layers.19.mlp.gate_up_proj.scales": "model-00003-of-00004.safetensors",
169
+ "model.layers.19.mlp.gate_up_proj.weight": "model-00003-of-00004.safetensors",
170
+ "model.layers.19.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
171
+ "model.layers.19.self_attn.o_proj.biases": "model-00003-of-00004.safetensors",
172
+ "model.layers.19.self_attn.o_proj.scales": "model-00003-of-00004.safetensors",
173
+ "model.layers.19.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
174
+ "model.layers.19.self_attn.qkv_proj.biases": "model-00003-of-00004.safetensors",
175
+ "model.layers.19.self_attn.qkv_proj.scales": "model-00003-of-00004.safetensors",
176
+ "model.layers.19.self_attn.qkv_proj.weight": "model-00003-of-00004.safetensors",
177
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00004.safetensors",
178
+ "model.layers.2.mlp.down_proj.biases": "model-00001-of-00004.safetensors",
179
+ "model.layers.2.mlp.down_proj.scales": "model-00001-of-00004.safetensors",
180
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00004.safetensors",
181
+ "model.layers.2.mlp.gate_up_proj.biases": "model-00001-of-00004.safetensors",
182
+ "model.layers.2.mlp.gate_up_proj.scales": "model-00001-of-00004.safetensors",
183
+ "model.layers.2.mlp.gate_up_proj.weight": "model-00001-of-00004.safetensors",
184
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00004.safetensors",
185
+ "model.layers.2.self_attn.o_proj.biases": "model-00001-of-00004.safetensors",
186
+ "model.layers.2.self_attn.o_proj.scales": "model-00001-of-00004.safetensors",
187
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
188
+ "model.layers.2.self_attn.qkv_proj.biases": "model-00001-of-00004.safetensors",
189
+ "model.layers.2.self_attn.qkv_proj.scales": "model-00001-of-00004.safetensors",
190
+ "model.layers.2.self_attn.qkv_proj.weight": "model-00001-of-00004.safetensors",
191
+ "model.layers.20.input_layernorm.weight": "model-00003-of-00004.safetensors",
192
+ "model.layers.20.mlp.down_proj.biases": "model-00003-of-00004.safetensors",
193
+ "model.layers.20.mlp.down_proj.scales": "model-00003-of-00004.safetensors",
194
+ "model.layers.20.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
195
+ "model.layers.20.mlp.gate_up_proj.biases": "model-00003-of-00004.safetensors",
196
+ "model.layers.20.mlp.gate_up_proj.scales": "model-00003-of-00004.safetensors",
197
+ "model.layers.20.mlp.gate_up_proj.weight": "model-00003-of-00004.safetensors",
198
+ "model.layers.20.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
199
+ "model.layers.20.self_attn.o_proj.biases": "model-00003-of-00004.safetensors",
200
+ "model.layers.20.self_attn.o_proj.scales": "model-00003-of-00004.safetensors",
201
+ "model.layers.20.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
202
+ "model.layers.20.self_attn.qkv_proj.biases": "model-00003-of-00004.safetensors",
203
+ "model.layers.20.self_attn.qkv_proj.scales": "model-00003-of-00004.safetensors",
204
+ "model.layers.20.self_attn.qkv_proj.weight": "model-00003-of-00004.safetensors",
205
+ "model.layers.21.input_layernorm.weight": "model-00003-of-00004.safetensors",
206
+ "model.layers.21.mlp.down_proj.biases": "model-00003-of-00004.safetensors",
207
+ "model.layers.21.mlp.down_proj.scales": "model-00003-of-00004.safetensors",
208
+ "model.layers.21.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
209
+ "model.layers.21.mlp.gate_up_proj.biases": "model-00003-of-00004.safetensors",
210
+ "model.layers.21.mlp.gate_up_proj.scales": "model-00003-of-00004.safetensors",
211
+ "model.layers.21.mlp.gate_up_proj.weight": "model-00003-of-00004.safetensors",
212
+ "model.layers.21.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
213
+ "model.layers.21.self_attn.o_proj.biases": "model-00003-of-00004.safetensors",
214
+ "model.layers.21.self_attn.o_proj.scales": "model-00003-of-00004.safetensors",
215
+ "model.layers.21.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
216
+ "model.layers.21.self_attn.qkv_proj.biases": "model-00003-of-00004.safetensors",
217
+ "model.layers.21.self_attn.qkv_proj.scales": "model-00003-of-00004.safetensors",
218
+ "model.layers.21.self_attn.qkv_proj.weight": "model-00003-of-00004.safetensors",
219
+ "model.layers.22.input_layernorm.weight": "model-00003-of-00004.safetensors",
220
+ "model.layers.22.mlp.down_proj.biases": "model-00003-of-00004.safetensors",
221
+ "model.layers.22.mlp.down_proj.scales": "model-00003-of-00004.safetensors",
222
+ "model.layers.22.mlp.down_proj.weight": "model-00003-of-00004.safetensors",
223
+ "model.layers.22.mlp.gate_up_proj.biases": "model-00003-of-00004.safetensors",
224
+ "model.layers.22.mlp.gate_up_proj.scales": "model-00003-of-00004.safetensors",
225
+ "model.layers.22.mlp.gate_up_proj.weight": "model-00003-of-00004.safetensors",
226
+ "model.layers.22.post_attention_layernorm.weight": "model-00003-of-00004.safetensors",
227
+ "model.layers.22.self_attn.o_proj.biases": "model-00003-of-00004.safetensors",
228
+ "model.layers.22.self_attn.o_proj.scales": "model-00003-of-00004.safetensors",
229
+ "model.layers.22.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
230
+ "model.layers.22.self_attn.qkv_proj.biases": "model-00003-of-00004.safetensors",
231
+ "model.layers.22.self_attn.qkv_proj.scales": "model-00003-of-00004.safetensors",
232
+ "model.layers.22.self_attn.qkv_proj.weight": "model-00003-of-00004.safetensors",
233
+ "model.layers.23.input_layernorm.weight": "model-00004-of-00004.safetensors",
234
+ "model.layers.23.mlp.down_proj.biases": "model-00004-of-00004.safetensors",
235
+ "model.layers.23.mlp.down_proj.scales": "model-00004-of-00004.safetensors",
236
+ "model.layers.23.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
237
+ "model.layers.23.mlp.gate_up_proj.biases": "model-00003-of-00004.safetensors",
238
+ "model.layers.23.mlp.gate_up_proj.scales": "model-00003-of-00004.safetensors",
239
+ "model.layers.23.mlp.gate_up_proj.weight": "model-00003-of-00004.safetensors",
240
+ "model.layers.23.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
241
+ "model.layers.23.self_attn.o_proj.biases": "model-00003-of-00004.safetensors",
242
+ "model.layers.23.self_attn.o_proj.scales": "model-00003-of-00004.safetensors",
243
+ "model.layers.23.self_attn.o_proj.weight": "model-00003-of-00004.safetensors",
244
+ "model.layers.23.self_attn.qkv_proj.biases": "model-00003-of-00004.safetensors",
245
+ "model.layers.23.self_attn.qkv_proj.scales": "model-00003-of-00004.safetensors",
246
+ "model.layers.23.self_attn.qkv_proj.weight": "model-00003-of-00004.safetensors",
247
+ "model.layers.24.input_layernorm.weight": "model-00004-of-00004.safetensors",
248
+ "model.layers.24.mlp.down_proj.biases": "model-00004-of-00004.safetensors",
249
+ "model.layers.24.mlp.down_proj.scales": "model-00004-of-00004.safetensors",
250
+ "model.layers.24.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
251
+ "model.layers.24.mlp.gate_up_proj.biases": "model-00004-of-00004.safetensors",
252
+ "model.layers.24.mlp.gate_up_proj.scales": "model-00004-of-00004.safetensors",
253
+ "model.layers.24.mlp.gate_up_proj.weight": "model-00004-of-00004.safetensors",
254
+ "model.layers.24.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
255
+ "model.layers.24.self_attn.o_proj.biases": "model-00004-of-00004.safetensors",
256
+ "model.layers.24.self_attn.o_proj.scales": "model-00004-of-00004.safetensors",
257
+ "model.layers.24.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
258
+ "model.layers.24.self_attn.qkv_proj.biases": "model-00004-of-00004.safetensors",
259
+ "model.layers.24.self_attn.qkv_proj.scales": "model-00004-of-00004.safetensors",
260
+ "model.layers.24.self_attn.qkv_proj.weight": "model-00004-of-00004.safetensors",
261
+ "model.layers.25.input_layernorm.weight": "model-00004-of-00004.safetensors",
262
+ "model.layers.25.mlp.down_proj.biases": "model-00004-of-00004.safetensors",
263
+ "model.layers.25.mlp.down_proj.scales": "model-00004-of-00004.safetensors",
264
+ "model.layers.25.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
265
+ "model.layers.25.mlp.gate_up_proj.biases": "model-00004-of-00004.safetensors",
266
+ "model.layers.25.mlp.gate_up_proj.scales": "model-00004-of-00004.safetensors",
267
+ "model.layers.25.mlp.gate_up_proj.weight": "model-00004-of-00004.safetensors",
268
+ "model.layers.25.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
269
+ "model.layers.25.self_attn.o_proj.biases": "model-00004-of-00004.safetensors",
270
+ "model.layers.25.self_attn.o_proj.scales": "model-00004-of-00004.safetensors",
271
+ "model.layers.25.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
272
+ "model.layers.25.self_attn.qkv_proj.biases": "model-00004-of-00004.safetensors",
273
+ "model.layers.25.self_attn.qkv_proj.scales": "model-00004-of-00004.safetensors",
274
+ "model.layers.25.self_attn.qkv_proj.weight": "model-00004-of-00004.safetensors",
275
+ "model.layers.26.input_layernorm.weight": "model-00004-of-00004.safetensors",
276
+ "model.layers.26.mlp.down_proj.biases": "model-00004-of-00004.safetensors",
277
+ "model.layers.26.mlp.down_proj.scales": "model-00004-of-00004.safetensors",
278
+ "model.layers.26.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
279
+ "model.layers.26.mlp.gate_up_proj.biases": "model-00004-of-00004.safetensors",
280
+ "model.layers.26.mlp.gate_up_proj.scales": "model-00004-of-00004.safetensors",
281
+ "model.layers.26.mlp.gate_up_proj.weight": "model-00004-of-00004.safetensors",
282
+ "model.layers.26.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
283
+ "model.layers.26.self_attn.o_proj.biases": "model-00004-of-00004.safetensors",
284
+ "model.layers.26.self_attn.o_proj.scales": "model-00004-of-00004.safetensors",
285
+ "model.layers.26.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
286
+ "model.layers.26.self_attn.qkv_proj.biases": "model-00004-of-00004.safetensors",
287
+ "model.layers.26.self_attn.qkv_proj.scales": "model-00004-of-00004.safetensors",
288
+ "model.layers.26.self_attn.qkv_proj.weight": "model-00004-of-00004.safetensors",
289
+ "model.layers.27.input_layernorm.weight": "model-00004-of-00004.safetensors",
290
+ "model.layers.27.mlp.down_proj.biases": "model-00004-of-00004.safetensors",
291
+ "model.layers.27.mlp.down_proj.scales": "model-00004-of-00004.safetensors",
292
+ "model.layers.27.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
293
+ "model.layers.27.mlp.gate_up_proj.biases": "model-00004-of-00004.safetensors",
294
+ "model.layers.27.mlp.gate_up_proj.scales": "model-00004-of-00004.safetensors",
295
+ "model.layers.27.mlp.gate_up_proj.weight": "model-00004-of-00004.safetensors",
296
+ "model.layers.27.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
297
+ "model.layers.27.self_attn.o_proj.biases": "model-00004-of-00004.safetensors",
298
+ "model.layers.27.self_attn.o_proj.scales": "model-00004-of-00004.safetensors",
299
+ "model.layers.27.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
300
+ "model.layers.27.self_attn.qkv_proj.biases": "model-00004-of-00004.safetensors",
301
+ "model.layers.27.self_attn.qkv_proj.scales": "model-00004-of-00004.safetensors",
302
+ "model.layers.27.self_attn.qkv_proj.weight": "model-00004-of-00004.safetensors",
303
+ "model.layers.28.input_layernorm.weight": "model-00004-of-00004.safetensors",
304
+ "model.layers.28.mlp.down_proj.biases": "model-00004-of-00004.safetensors",
305
+ "model.layers.28.mlp.down_proj.scales": "model-00004-of-00004.safetensors",
306
+ "model.layers.28.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
307
+ "model.layers.28.mlp.gate_up_proj.biases": "model-00004-of-00004.safetensors",
308
+ "model.layers.28.mlp.gate_up_proj.scales": "model-00004-of-00004.safetensors",
309
+ "model.layers.28.mlp.gate_up_proj.weight": "model-00004-of-00004.safetensors",
310
+ "model.layers.28.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
311
+ "model.layers.28.self_attn.o_proj.biases": "model-00004-of-00004.safetensors",
312
+ "model.layers.28.self_attn.o_proj.scales": "model-00004-of-00004.safetensors",
313
+ "model.layers.28.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
314
+ "model.layers.28.self_attn.qkv_proj.biases": "model-00004-of-00004.safetensors",
315
+ "model.layers.28.self_attn.qkv_proj.scales": "model-00004-of-00004.safetensors",
316
+ "model.layers.28.self_attn.qkv_proj.weight": "model-00004-of-00004.safetensors",
317
+ "model.layers.29.input_layernorm.weight": "model-00004-of-00004.safetensors",
318
+ "model.layers.29.mlp.down_proj.biases": "model-00004-of-00004.safetensors",
319
+ "model.layers.29.mlp.down_proj.scales": "model-00004-of-00004.safetensors",
320
+ "model.layers.29.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
321
+ "model.layers.29.mlp.gate_up_proj.biases": "model-00004-of-00004.safetensors",
322
+ "model.layers.29.mlp.gate_up_proj.scales": "model-00004-of-00004.safetensors",
323
+ "model.layers.29.mlp.gate_up_proj.weight": "model-00004-of-00004.safetensors",
324
+ "model.layers.29.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
325
+ "model.layers.29.self_attn.o_proj.biases": "model-00004-of-00004.safetensors",
326
+ "model.layers.29.self_attn.o_proj.scales": "model-00004-of-00004.safetensors",
327
+ "model.layers.29.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
328
+ "model.layers.29.self_attn.qkv_proj.biases": "model-00004-of-00004.safetensors",
329
+ "model.layers.29.self_attn.qkv_proj.scales": "model-00004-of-00004.safetensors",
330
+ "model.layers.29.self_attn.qkv_proj.weight": "model-00004-of-00004.safetensors",
331
+ "model.layers.3.input_layernorm.weight": "model-00002-of-00004.safetensors",
332
+ "model.layers.3.mlp.down_proj.biases": "model-00002-of-00004.safetensors",
333
+ "model.layers.3.mlp.down_proj.scales": "model-00002-of-00004.safetensors",
334
+ "model.layers.3.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
335
+ "model.layers.3.mlp.gate_up_proj.biases": "model-00001-of-00004.safetensors",
336
+ "model.layers.3.mlp.gate_up_proj.scales": "model-00001-of-00004.safetensors",
337
+ "model.layers.3.mlp.gate_up_proj.weight": "model-00001-of-00004.safetensors",
338
+ "model.layers.3.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
339
+ "model.layers.3.self_attn.o_proj.biases": "model-00001-of-00004.safetensors",
340
+ "model.layers.3.self_attn.o_proj.scales": "model-00001-of-00004.safetensors",
341
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00004.safetensors",
342
+ "model.layers.3.self_attn.qkv_proj.biases": "model-00001-of-00004.safetensors",
343
+ "model.layers.3.self_attn.qkv_proj.scales": "model-00001-of-00004.safetensors",
344
+ "model.layers.3.self_attn.qkv_proj.weight": "model-00001-of-00004.safetensors",
345
+ "model.layers.30.input_layernorm.weight": "model-00004-of-00004.safetensors",
346
+ "model.layers.30.mlp.down_proj.biases": "model-00004-of-00004.safetensors",
347
+ "model.layers.30.mlp.down_proj.scales": "model-00004-of-00004.safetensors",
348
+ "model.layers.30.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
349
+ "model.layers.30.mlp.gate_up_proj.biases": "model-00004-of-00004.safetensors",
350
+ "model.layers.30.mlp.gate_up_proj.scales": "model-00004-of-00004.safetensors",
351
+ "model.layers.30.mlp.gate_up_proj.weight": "model-00004-of-00004.safetensors",
352
+ "model.layers.30.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
353
+ "model.layers.30.self_attn.o_proj.biases": "model-00004-of-00004.safetensors",
354
+ "model.layers.30.self_attn.o_proj.scales": "model-00004-of-00004.safetensors",
355
+ "model.layers.30.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
356
+ "model.layers.30.self_attn.qkv_proj.biases": "model-00004-of-00004.safetensors",
357
+ "model.layers.30.self_attn.qkv_proj.scales": "model-00004-of-00004.safetensors",
358
+ "model.layers.30.self_attn.qkv_proj.weight": "model-00004-of-00004.safetensors",
359
+ "model.layers.31.input_layernorm.weight": "model-00004-of-00004.safetensors",
360
+ "model.layers.31.mlp.down_proj.biases": "model-00004-of-00004.safetensors",
361
+ "model.layers.31.mlp.down_proj.scales": "model-00004-of-00004.safetensors",
362
+ "model.layers.31.mlp.down_proj.weight": "model-00004-of-00004.safetensors",
363
+ "model.layers.31.mlp.gate_up_proj.biases": "model-00004-of-00004.safetensors",
364
+ "model.layers.31.mlp.gate_up_proj.scales": "model-00004-of-00004.safetensors",
365
+ "model.layers.31.mlp.gate_up_proj.weight": "model-00004-of-00004.safetensors",
366
+ "model.layers.31.post_attention_layernorm.weight": "model-00004-of-00004.safetensors",
367
+ "model.layers.31.self_attn.o_proj.biases": "model-00004-of-00004.safetensors",
368
+ "model.layers.31.self_attn.o_proj.scales": "model-00004-of-00004.safetensors",
369
+ "model.layers.31.self_attn.o_proj.weight": "model-00004-of-00004.safetensors",
370
+ "model.layers.31.self_attn.qkv_proj.biases": "model-00004-of-00004.safetensors",
371
+ "model.layers.31.self_attn.qkv_proj.scales": "model-00004-of-00004.safetensors",
372
+ "model.layers.31.self_attn.qkv_proj.weight": "model-00004-of-00004.safetensors",
373
+ "model.layers.4.input_layernorm.weight": "model-00002-of-00004.safetensors",
374
+ "model.layers.4.mlp.down_proj.biases": "model-00002-of-00004.safetensors",
375
+ "model.layers.4.mlp.down_proj.scales": "model-00002-of-00004.safetensors",
376
+ "model.layers.4.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
377
+ "model.layers.4.mlp.gate_up_proj.biases": "model-00002-of-00004.safetensors",
378
+ "model.layers.4.mlp.gate_up_proj.scales": "model-00002-of-00004.safetensors",
379
+ "model.layers.4.mlp.gate_up_proj.weight": "model-00002-of-00004.safetensors",
380
+ "model.layers.4.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
381
+ "model.layers.4.self_attn.o_proj.biases": "model-00002-of-00004.safetensors",
382
+ "model.layers.4.self_attn.o_proj.scales": "model-00002-of-00004.safetensors",
383
+ "model.layers.4.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
384
+ "model.layers.4.self_attn.qkv_proj.biases": "model-00002-of-00004.safetensors",
385
+ "model.layers.4.self_attn.qkv_proj.scales": "model-00002-of-00004.safetensors",
386
+ "model.layers.4.self_attn.qkv_proj.weight": "model-00002-of-00004.safetensors",
387
+ "model.layers.5.input_layernorm.weight": "model-00002-of-00004.safetensors",
388
+ "model.layers.5.mlp.down_proj.biases": "model-00002-of-00004.safetensors",
389
+ "model.layers.5.mlp.down_proj.scales": "model-00002-of-00004.safetensors",
390
+ "model.layers.5.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
391
+ "model.layers.5.mlp.gate_up_proj.biases": "model-00002-of-00004.safetensors",
392
+ "model.layers.5.mlp.gate_up_proj.scales": "model-00002-of-00004.safetensors",
393
+ "model.layers.5.mlp.gate_up_proj.weight": "model-00002-of-00004.safetensors",
394
+ "model.layers.5.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
395
+ "model.layers.5.self_attn.o_proj.biases": "model-00002-of-00004.safetensors",
396
+ "model.layers.5.self_attn.o_proj.scales": "model-00002-of-00004.safetensors",
397
+ "model.layers.5.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
398
+ "model.layers.5.self_attn.qkv_proj.biases": "model-00002-of-00004.safetensors",
399
+ "model.layers.5.self_attn.qkv_proj.scales": "model-00002-of-00004.safetensors",
400
+ "model.layers.5.self_attn.qkv_proj.weight": "model-00002-of-00004.safetensors",
401
+ "model.layers.6.input_layernorm.weight": "model-00002-of-00004.safetensors",
402
+ "model.layers.6.mlp.down_proj.biases": "model-00002-of-00004.safetensors",
403
+ "model.layers.6.mlp.down_proj.scales": "model-00002-of-00004.safetensors",
404
+ "model.layers.6.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
405
+ "model.layers.6.mlp.gate_up_proj.biases": "model-00002-of-00004.safetensors",
406
+ "model.layers.6.mlp.gate_up_proj.scales": "model-00002-of-00004.safetensors",
407
+ "model.layers.6.mlp.gate_up_proj.weight": "model-00002-of-00004.safetensors",
408
+ "model.layers.6.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
409
+ "model.layers.6.self_attn.o_proj.biases": "model-00002-of-00004.safetensors",
410
+ "model.layers.6.self_attn.o_proj.scales": "model-00002-of-00004.safetensors",
411
+ "model.layers.6.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
412
+ "model.layers.6.self_attn.qkv_proj.biases": "model-00002-of-00004.safetensors",
413
+ "model.layers.6.self_attn.qkv_proj.scales": "model-00002-of-00004.safetensors",
414
+ "model.layers.6.self_attn.qkv_proj.weight": "model-00002-of-00004.safetensors",
415
+ "model.layers.7.input_layernorm.weight": "model-00002-of-00004.safetensors",
416
+ "model.layers.7.mlp.down_proj.biases": "model-00002-of-00004.safetensors",
417
+ "model.layers.7.mlp.down_proj.scales": "model-00002-of-00004.safetensors",
418
+ "model.layers.7.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
419
+ "model.layers.7.mlp.gate_up_proj.biases": "model-00002-of-00004.safetensors",
420
+ "model.layers.7.mlp.gate_up_proj.scales": "model-00002-of-00004.safetensors",
421
+ "model.layers.7.mlp.gate_up_proj.weight": "model-00002-of-00004.safetensors",
422
+ "model.layers.7.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
423
+ "model.layers.7.self_attn.o_proj.biases": "model-00002-of-00004.safetensors",
424
+ "model.layers.7.self_attn.o_proj.scales": "model-00002-of-00004.safetensors",
425
+ "model.layers.7.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
426
+ "model.layers.7.self_attn.qkv_proj.biases": "model-00002-of-00004.safetensors",
427
+ "model.layers.7.self_attn.qkv_proj.scales": "model-00002-of-00004.safetensors",
428
+ "model.layers.7.self_attn.qkv_proj.weight": "model-00002-of-00004.safetensors",
429
+ "model.layers.8.input_layernorm.weight": "model-00002-of-00004.safetensors",
430
+ "model.layers.8.mlp.down_proj.biases": "model-00002-of-00004.safetensors",
431
+ "model.layers.8.mlp.down_proj.scales": "model-00002-of-00004.safetensors",
432
+ "model.layers.8.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
433
+ "model.layers.8.mlp.gate_up_proj.biases": "model-00002-of-00004.safetensors",
434
+ "model.layers.8.mlp.gate_up_proj.scales": "model-00002-of-00004.safetensors",
435
+ "model.layers.8.mlp.gate_up_proj.weight": "model-00002-of-00004.safetensors",
436
+ "model.layers.8.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
437
+ "model.layers.8.self_attn.o_proj.biases": "model-00002-of-00004.safetensors",
438
+ "model.layers.8.self_attn.o_proj.scales": "model-00002-of-00004.safetensors",
439
+ "model.layers.8.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
440
+ "model.layers.8.self_attn.qkv_proj.biases": "model-00002-of-00004.safetensors",
441
+ "model.layers.8.self_attn.qkv_proj.scales": "model-00002-of-00004.safetensors",
442
+ "model.layers.8.self_attn.qkv_proj.weight": "model-00002-of-00004.safetensors",
443
+ "model.layers.9.input_layernorm.weight": "model-00002-of-00004.safetensors",
444
+ "model.layers.9.mlp.down_proj.biases": "model-00002-of-00004.safetensors",
445
+ "model.layers.9.mlp.down_proj.scales": "model-00002-of-00004.safetensors",
446
+ "model.layers.9.mlp.down_proj.weight": "model-00002-of-00004.safetensors",
447
+ "model.layers.9.mlp.gate_up_proj.biases": "model-00002-of-00004.safetensors",
448
+ "model.layers.9.mlp.gate_up_proj.scales": "model-00002-of-00004.safetensors",
449
+ "model.layers.9.mlp.gate_up_proj.weight": "model-00002-of-00004.safetensors",
450
+ "model.layers.9.post_attention_layernorm.weight": "model-00002-of-00004.safetensors",
451
+ "model.layers.9.self_attn.o_proj.biases": "model-00002-of-00004.safetensors",
452
+ "model.layers.9.self_attn.o_proj.scales": "model-00002-of-00004.safetensors",
453
+ "model.layers.9.self_attn.o_proj.weight": "model-00002-of-00004.safetensors",
454
+ "model.layers.9.self_attn.qkv_proj.biases": "model-00002-of-00004.safetensors",
455
+ "model.layers.9.self_attn.qkv_proj.scales": "model-00002-of-00004.safetensors",
456
+ "model.layers.9.self_attn.qkv_proj.weight": "model-00002-of-00004.safetensors",
457
+ "model.norm.weight": "model-00004-of-00004.safetensors"
458
+ }
459
+ }
modeling_phi3.py ADDED
@@ -0,0 +1,1180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ """PyTorch Phi-3 model."""
17
+
18
+ from typing import Callable, List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ from torch import nn
22
+
23
+ from transformers.activations import ACT2FN
24
+ from transformers.cache_utils import Cache, DynamicCache, SlidingWindowCache, StaticCache
25
+ from transformers.generation import GenerationMixin
26
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter
27
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
28
+ from transformers.modeling_outputs import (
29
+ BaseModelOutputWithPast,
30
+ CausalLMOutputWithPast,
31
+ SequenceClassifierOutputWithPast,
32
+ TokenClassifierOutput,
33
+ )
34
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS
35
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
36
+ from transformers.processing_utils import Unpack
37
+ from transformers.utils import (
38
+ LossKwargs,
39
+ add_code_sample_docstrings,
40
+ add_start_docstrings,
41
+ add_start_docstrings_to_model_forward,
42
+ logging,
43
+ replace_return_docstrings,
44
+ )
45
+ from transformers.utils.deprecation import deprecate_kwarg
46
+ from .configuration_phi3 import Phi3Config
47
+
48
+
49
+ logger = logging.get_logger(__name__)
50
+
51
+ _CHECKPOINT_FOR_DOC = "microsoft/Phi-3-mini-4k-instruct"
52
+ _CONFIG_FOR_DOC = "Phi3Config"
53
+
54
+
55
+ class Phi3MLP(nn.Module):
56
+ def __init__(self, config):
57
+ super().__init__()
58
+
59
+ self.config = config
60
+ self.gate_up_proj = nn.Linear(config.hidden_size, 2 * config.intermediate_size, bias=False)
61
+ self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
62
+ self.activation_fn = ACT2FN[config.hidden_act]
63
+
64
+ def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
65
+ up_states = self.gate_up_proj(hidden_states)
66
+
67
+ gate, up_states = up_states.chunk(2, dim=-1)
68
+ up_states = up_states * self.activation_fn(gate)
69
+
70
+ return self.down_proj(up_states)
71
+
72
+
73
+ def rotate_half(x):
74
+ """Rotates half the hidden dims of the input."""
75
+ x1 = x[..., : x.shape[-1] // 2]
76
+ x2 = x[..., x.shape[-1] // 2 :]
77
+ return torch.cat((-x2, x1), dim=-1)
78
+
79
+
80
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
81
+ """
82
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
83
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
84
+ """
85
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
86
+ if n_rep == 1:
87
+ return hidden_states
88
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
89
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
90
+
91
+
92
+ def eager_attention_forward(
93
+ module: nn.Module,
94
+ query: torch.Tensor,
95
+ key: torch.Tensor,
96
+ value: torch.Tensor,
97
+ attention_mask: Optional[torch.Tensor],
98
+ scaling: float,
99
+ dropout: float = 0.0,
100
+ **kwargs,
101
+ ):
102
+ key_states = repeat_kv(key, module.num_key_value_groups)
103
+ value_states = repeat_kv(value, module.num_key_value_groups)
104
+
105
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
106
+ if attention_mask is not None:
107
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
108
+ attn_weights = attn_weights + causal_mask
109
+
110
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
111
+ attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
112
+ attn_output = torch.matmul(attn_weights, value_states)
113
+ attn_output = attn_output.transpose(1, 2).contiguous()
114
+
115
+ return attn_output, attn_weights
116
+
117
+
118
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
119
+ """Applies Rotary Position Embedding to the query and key tensors.
120
+
121
+ Args:
122
+ q (`torch.Tensor`): The query tensor.
123
+ k (`torch.Tensor`): The key tensor.
124
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
125
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
126
+ position_ids (`torch.Tensor`, *optional*):
127
+ Deprecated and unused.
128
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
129
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
130
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
131
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
132
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
133
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
134
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
135
+ Returns:
136
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
137
+ """
138
+ cos = cos.unsqueeze(unsqueeze_dim)
139
+ sin = sin.unsqueeze(unsqueeze_dim)
140
+
141
+ rotary_dim = cos.shape[-1]
142
+ q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
143
+ k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
144
+
145
+ q_embed = torch.cat([(q_rot * cos) + (rotate_half(q_rot) * sin), q_pass], dim=-1)
146
+ k_embed = torch.cat([(k_rot * cos) + (rotate_half(k_rot) * sin), k_pass], dim=-1)
147
+ return q_embed, k_embed
148
+
149
+
150
+ class Phi3Attention(nn.Module):
151
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
152
+
153
+ def __init__(self, config: Phi3Config, layer_idx: Optional[int] = None):
154
+ super().__init__()
155
+ self.config = config
156
+ self.layer_idx = layer_idx
157
+ self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
158
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
159
+ self.num_key_value_heads = config.num_key_value_heads
160
+ self.scaling = self.head_dim**-0.5
161
+ self.attention_dropout = config.attention_dropout
162
+ self.is_causal = True
163
+
164
+ op_size = config.num_attention_heads * self.head_dim + 2 * (config.num_key_value_heads * self.head_dim)
165
+ self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
166
+ self.qkv_proj = nn.Linear(config.hidden_size, op_size, bias=False)
167
+
168
+ def forward(
169
+ self,
170
+ hidden_states: torch.Tensor,
171
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
172
+ attention_mask: Optional[torch.Tensor],
173
+ past_key_value: Optional[Cache] = None,
174
+ cache_position: Optional[torch.LongTensor] = None,
175
+ **kwargs: Unpack[FlashAttentionKwargs],
176
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
177
+ input_shape = hidden_states.shape[:-1]
178
+ hidden_shape = (*input_shape, -1, self.head_dim)
179
+
180
+ qkv = self.qkv_proj(hidden_states)
181
+ query_pos = self.config.num_attention_heads * self.head_dim
182
+ query_states = qkv[..., :query_pos]
183
+ key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
184
+ value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
185
+
186
+ query_states = query_states.view(hidden_shape).transpose(1, 2)
187
+ key_states = key_states.view(hidden_shape).transpose(1, 2)
188
+ value_states = value_states.view(hidden_shape).transpose(1, 2)
189
+
190
+ cos, sin = position_embeddings
191
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
192
+
193
+ if past_key_value is not None:
194
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
195
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
196
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
197
+
198
+ attention_interface: Callable = eager_attention_forward
199
+ if self.config._attn_implementation != "eager":
200
+ if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
201
+ logger.warning_once(
202
+ "`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
203
+ 'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
204
+ )
205
+ else:
206
+ attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
207
+
208
+ attn_output, attn_weights = attention_interface(
209
+ self,
210
+ query_states,
211
+ key_states,
212
+ value_states,
213
+ attention_mask,
214
+ dropout=0.0 if not self.training else self.attention_dropout,
215
+ scaling=self.scaling,
216
+ sliding_window=getattr(self.config, "sliding_window", None),
217
+ **kwargs,
218
+ )
219
+
220
+ attn_output = attn_output.reshape(*input_shape, -1).contiguous()
221
+ attn_output = self.o_proj(attn_output)
222
+ return attn_output, attn_weights
223
+
224
+
225
+ class Phi3RMSNorm(nn.Module):
226
+ def __init__(self, hidden_size, eps=1e-6):
227
+ """
228
+ Phi3RMSNorm is equivalent to T5LayerNorm
229
+ """
230
+ super().__init__()
231
+ self.weight = nn.Parameter(torch.ones(hidden_size))
232
+ self.variance_epsilon = eps
233
+
234
+ def forward(self, hidden_states):
235
+ input_dtype = hidden_states.dtype
236
+ hidden_states = hidden_states.to(torch.float32)
237
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
238
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
239
+ return self.weight * hidden_states.to(input_dtype)
240
+
241
+ def extra_repr(self):
242
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
243
+
244
+
245
+ class Phi3DecoderLayer(nn.Module):
246
+ def __init__(self, config: Phi3Config, layer_idx: int):
247
+ super().__init__()
248
+ self.hidden_size = config.hidden_size
249
+ self.self_attn = Phi3Attention(config=config, layer_idx=layer_idx)
250
+ self.mlp = Phi3MLP(config)
251
+ self.input_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
252
+ self.post_attention_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
253
+ self.config = config
254
+ self.resid_attn_dropout = nn.Dropout(config.resid_pdrop)
255
+ self.resid_mlp_dropout = nn.Dropout(config.resid_pdrop)
256
+
257
+ def forward(
258
+ self,
259
+ hidden_states: torch.Tensor,
260
+ attention_mask: Optional[torch.Tensor] = None,
261
+ position_ids: Optional[torch.LongTensor] = None,
262
+ past_key_value: Optional[Cache] = None,
263
+ output_attentions: Optional[bool] = False,
264
+ use_cache: Optional[bool] = False,
265
+ cache_position: Optional[torch.LongTensor] = None,
266
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
267
+ **kwargs: Unpack[FlashAttentionKwargs],
268
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
269
+ """
270
+ Args:
271
+ hidden_states (`torch.FloatTensor`):
272
+ input to the layer of shape `(batch, seq_len, embed_dim)`
273
+ attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
274
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
275
+ position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
276
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
277
+ `[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
278
+ past_key_value (`Cache`, *optional*): cached past key and value projection states
279
+ output_attentions (`bool`, *optional*):
280
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
281
+ returned tensors for more detail.
282
+ use_cache (`bool`, *optional*):
283
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
284
+ (see `past_key_values`).
285
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
286
+ Indices depicting the position of the input sequence tokens in the sequence
287
+ kwargs (`dict`, *optional*):
288
+ Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
289
+ into the model
290
+ """
291
+ residual = hidden_states
292
+
293
+ hidden_states = self.input_layernorm(hidden_states)
294
+
295
+ # Self Attention
296
+ hidden_states, self_attn_weights = self.self_attn(
297
+ hidden_states=hidden_states,
298
+ attention_mask=attention_mask,
299
+ position_ids=position_ids,
300
+ past_key_value=past_key_value,
301
+ output_attentions=output_attentions,
302
+ use_cache=use_cache,
303
+ cache_position=cache_position,
304
+ position_embeddings=position_embeddings,
305
+ **kwargs,
306
+ )
307
+ hidden_states = residual + self.resid_attn_dropout(hidden_states) # main diff with Llama
308
+
309
+ residual = hidden_states
310
+ hidden_states = self.post_attention_layernorm(hidden_states)
311
+ hidden_states = self.mlp(hidden_states)
312
+ hidden_states = residual + self.resid_mlp_dropout(hidden_states) # main diff with Llama
313
+
314
+ outputs = (hidden_states,)
315
+ if output_attentions:
316
+ outputs += (self_attn_weights,)
317
+
318
+ return outputs
319
+
320
+
321
+ class Phi3RotaryEmbedding(nn.Module):
322
+ def __init__(self, config: Phi3Config, device=None):
323
+ super().__init__()
324
+ # BC: "rope_type" was originally "type"
325
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
326
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
327
+ else:
328
+ self.rope_type = "default"
329
+ self.max_seq_len_cached = config.max_position_embeddings
330
+ self.original_max_seq_len = config.max_position_embeddings
331
+
332
+ self.config = config
333
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
334
+
335
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
336
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
337
+ self.original_inv_freq = self.inv_freq
338
+
339
+ def _dynamic_frequency_update(self, position_ids, device):
340
+ """
341
+ dynamic RoPE layers should recompute `inv_freq` in the following situations:
342
+ 1 - growing beyond the cached sequence length (allow scaling)
343
+ 2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
344
+ """
345
+ seq_len = torch.max(position_ids) + 1
346
+ if seq_len > self.max_seq_len_cached: # growth
347
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
348
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
349
+ self.max_seq_len_cached = seq_len
350
+
351
+ if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
352
+ # This .to() is needed if the model has been moved to a device after being initialized (because
353
+ # the buffer is automatically moved, but not the original copy)
354
+ self.original_inv_freq = self.original_inv_freq.to(device)
355
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
356
+ self.max_seq_len_cached = self.original_max_seq_len
357
+
358
+ @torch.no_grad()
359
+ def forward(self, x, position_ids):
360
+ if "dynamic" in self.rope_type:
361
+ self._dynamic_frequency_update(position_ids, device=x.device)
362
+ elif self.rope_type == "longrope":
363
+ self._longrope_frequency_update(position_ids, device=x.device)
364
+
365
+ # Core RoPE block
366
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
367
+ position_ids_expanded = position_ids[:, None, :].float()
368
+ # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
369
+ device_type = x.device.type
370
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
371
+ with torch.autocast(device_type=device_type, enabled=False):
372
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
373
+ emb = torch.cat((freqs, freqs), dim=-1)
374
+ cos = emb.cos()
375
+ sin = emb.sin()
376
+
377
+ # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
378
+ cos = cos * self.attention_scaling
379
+ sin = sin * self.attention_scaling
380
+
381
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
382
+
383
+ def _longrope_frequency_update(self, position_ids, device):
384
+ """Longrope uses long factor if sequence is larger than original pretraining length, short otherwise."""
385
+ seq_len = torch.max(position_ids) + 1
386
+ if hasattr(self.config, "original_max_position_embeddings"):
387
+ original_max_position_embeddings = self.config.original_max_position_embeddings
388
+ else:
389
+ original_max_position_embeddings = self.config.max_position_embeddings
390
+ if seq_len > original_max_position_embeddings:
391
+ if not hasattr(self, "long_inv_freq"):
392
+ self.long_inv_freq, _ = self.rope_init_fn(
393
+ self.config, device, seq_len=original_max_position_embeddings + 1
394
+ )
395
+ self.register_buffer("inv_freq", self.long_inv_freq, persistent=False)
396
+ else:
397
+ # This .to() is needed if the model has been moved to a device after being initialized (because
398
+ # the buffer is automatically moved, but not the original copy)
399
+ self.original_inv_freq = self.original_inv_freq.to(device)
400
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
401
+
402
+
403
+ PHI3_START_DOCSTRING = r"""
404
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
405
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
406
+ etc.)
407
+
408
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
409
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
410
+ and behavior.
411
+
412
+ Parameters:
413
+ config ([`Phi3Config`]):
414
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
415
+ load the weights associated with the model, only the configuration. Check out the
416
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
417
+ """
418
+
419
+
420
+ @add_start_docstrings(
421
+ "The bare Phi3 Model outputting raw hidden-states without any specific head on top.",
422
+ PHI3_START_DOCSTRING,
423
+ )
424
+ class Phi3PreTrainedModel(PreTrainedModel):
425
+ config_class = Phi3Config
426
+ base_model_prefix = "model"
427
+ supports_gradient_checkpointing = True
428
+ _no_split_modules = ["Phi3DecoderLayer"]
429
+ _skip_keys_device_placement = ["past_key_values"]
430
+ _supports_flash_attn_2 = True
431
+ _supports_sdpa = True
432
+ _supports_flex_attn = True
433
+ _supports_cache_class = True
434
+ _supports_quantized_cache = True
435
+ _supports_static_cache = True
436
+ _supports_attention_backend = True
437
+ _version = "0.0.5"
438
+
439
+ def _init_weights(self, module):
440
+ std = self.config.initializer_range
441
+ if isinstance(module, nn.Linear):
442
+ module.weight.data.normal_(mean=0.0, std=std)
443
+ if module.bias is not None:
444
+ module.bias.data.zero_()
445
+ elif isinstance(module, nn.Embedding):
446
+ module.weight.data.normal_(mean=0.0, std=std)
447
+ if module.padding_idx is not None:
448
+ module.weight.data[module.padding_idx].zero_()
449
+
450
+
451
+ PHI3_INPUTS_DOCSTRING = r"""
452
+ Args:
453
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
454
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
455
+ it.
456
+
457
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
458
+ [`PreTrainedTokenizer.__call__`] for details.
459
+
460
+ [What are input IDs?](../glossary#input-ids)
461
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
462
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
463
+
464
+ - 1 for tokens that are **not masked**,
465
+ - 0 for tokens that are **masked**.
466
+
467
+ [What are attention masks?](../glossary#attention-mask)
468
+
469
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
470
+ [`PreTrainedTokenizer.__call__`] for details.
471
+
472
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
473
+ `past_key_values`).
474
+
475
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
476
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
477
+ information on the default strategy.
478
+
479
+ - 1 indicates the head is **not masked**,
480
+ - 0 indicates the head is **masked**.
481
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
482
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
483
+ config.n_positions - 1]`.
484
+
485
+ [What are position IDs?](../glossary#position-ids)
486
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
487
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
488
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
489
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
490
+
491
+ Two formats are allowed:
492
+ - a [`~cache_utils.Cache`] instance, see our
493
+ [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);
494
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
495
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
496
+ cache format.
497
+
498
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
499
+ legacy cache format will be returned.
500
+
501
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
502
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
503
+ of shape `(batch_size, sequence_length)`.
504
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
505
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
506
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
507
+ model's internal embedding lookup matrix.
508
+ use_cache (`bool`, *optional*):
509
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
510
+ `past_key_values`).
511
+ output_attentions (`bool`, *optional*):
512
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
513
+ tensors for more detail.
514
+ output_hidden_states (`bool`, *optional*):
515
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
516
+ more detail.
517
+ return_dict (`bool`, *optional*):
518
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
519
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
520
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
521
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
522
+ the complete sequence length.
523
+ """
524
+
525
+
526
+ @add_start_docstrings(
527
+ "The bare Phi3 Model outputting raw hidden-states without any specific head on top.",
528
+ PHI3_START_DOCSTRING,
529
+ )
530
+ class Phi3Model(Phi3PreTrainedModel):
531
+ """
532
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Phi3DecoderLayer`]
533
+
534
+ Args:
535
+ config: Phi3Config
536
+ """
537
+
538
+ def __init__(self, config: Phi3Config):
539
+ super().__init__(config)
540
+ self.padding_idx = config.pad_token_id
541
+ self.vocab_size = config.vocab_size
542
+
543
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
544
+ self.layers = nn.ModuleList(
545
+ [Phi3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
546
+ )
547
+ self.norm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
548
+ self.rotary_emb = Phi3RotaryEmbedding(config=config)
549
+ self.gradient_checkpointing = False
550
+
551
+ # Initialize weights and apply final processing
552
+ self.post_init()
553
+
554
+ def get_input_embeddings(self):
555
+ return self.embed_tokens
556
+
557
+ def set_input_embeddings(self, value):
558
+ self.embed_tokens = value
559
+
560
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
561
+ def forward(
562
+ self,
563
+ input_ids: torch.LongTensor = None,
564
+ attention_mask: Optional[torch.Tensor] = None,
565
+ position_ids: Optional[torch.LongTensor] = None,
566
+ past_key_values: Optional[Cache] = None,
567
+ inputs_embeds: Optional[torch.FloatTensor] = None,
568
+ use_cache: Optional[bool] = None,
569
+ output_attentions: Optional[bool] = None,
570
+ output_hidden_states: Optional[bool] = None,
571
+ return_dict: Optional[bool] = None,
572
+ cache_position: Optional[torch.LongTensor] = None,
573
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
574
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
575
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
576
+ output_hidden_states = (
577
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
578
+ )
579
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
580
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
581
+
582
+ if (input_ids is None) ^ (inputs_embeds is not None):
583
+ raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
584
+
585
+ if self.gradient_checkpointing and self.training and use_cache:
586
+ logger.warning_once(
587
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
588
+ )
589
+ use_cache = False
590
+
591
+ if inputs_embeds is None:
592
+ inputs_embeds = self.embed_tokens(input_ids)
593
+
594
+ if use_cache and past_key_values is None:
595
+ past_key_values = DynamicCache()
596
+
597
+ if cache_position is None:
598
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
599
+ cache_position = torch.arange(
600
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
601
+ )
602
+
603
+ if position_ids is None:
604
+ position_ids = cache_position.unsqueeze(0)
605
+
606
+ causal_mask = self._update_causal_mask(
607
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
608
+ )
609
+
610
+ hidden_states = inputs_embeds
611
+
612
+ # create position embeddings to be shared across the decoder layers
613
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
614
+
615
+ # decoder layers
616
+ all_hidden_states = () if output_hidden_states else None
617
+ all_self_attns = () if output_attentions else None
618
+
619
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
620
+ if output_hidden_states:
621
+ all_hidden_states += (hidden_states,)
622
+
623
+ if self.gradient_checkpointing and self.training:
624
+ layer_outputs = self._gradient_checkpointing_func(
625
+ decoder_layer.__call__,
626
+ hidden_states,
627
+ causal_mask,
628
+ position_ids,
629
+ past_key_values,
630
+ output_attentions,
631
+ use_cache,
632
+ cache_position,
633
+ position_embeddings,
634
+ )
635
+ else:
636
+ layer_outputs = decoder_layer(
637
+ hidden_states,
638
+ attention_mask=causal_mask,
639
+ position_ids=position_ids,
640
+ past_key_value=past_key_values,
641
+ output_attentions=output_attentions,
642
+ use_cache=use_cache,
643
+ cache_position=cache_position,
644
+ position_embeddings=position_embeddings,
645
+ **flash_attn_kwargs,
646
+ )
647
+
648
+ hidden_states = layer_outputs[0]
649
+
650
+ if output_attentions:
651
+ all_self_attns += (layer_outputs[1],)
652
+
653
+ hidden_states = self.norm(hidden_states)
654
+
655
+ # add hidden states from the last decoder layer
656
+ if output_hidden_states:
657
+ all_hidden_states += (hidden_states,)
658
+
659
+ output = BaseModelOutputWithPast(
660
+ last_hidden_state=hidden_states,
661
+ past_key_values=past_key_values if use_cache else None,
662
+ hidden_states=all_hidden_states,
663
+ attentions=all_self_attns,
664
+ )
665
+ return output if return_dict else output.to_tuple()
666
+
667
+ def _update_causal_mask(
668
+ self,
669
+ attention_mask: torch.Tensor,
670
+ input_tensor: torch.Tensor,
671
+ cache_position: torch.Tensor,
672
+ past_key_values: Cache,
673
+ output_attentions: bool,
674
+ ):
675
+ if self.config._attn_implementation == "flash_attention_2":
676
+ if attention_mask is not None and past_key_values is not None:
677
+ is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0]
678
+ if is_padding_right:
679
+ raise ValueError(
680
+ "You are attempting to perform batched generation with padding_side='right'"
681
+ " this may lead to unexpected behaviour for Flash Attention version of Phi3. Make sure to "
682
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
683
+ )
684
+ if attention_mask is not None and 0.0 in attention_mask:
685
+ return attention_mask
686
+ return None
687
+
688
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
689
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
690
+ # to infer the attention mask.
691
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
692
+ using_static_cache = isinstance(past_key_values, StaticCache)
693
+ using_sliding_window_cache = isinstance(past_key_values, SlidingWindowCache)
694
+
695
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
696
+ if (
697
+ self.config._attn_implementation == "sdpa"
698
+ and not (using_static_cache or using_sliding_window_cache)
699
+ and not output_attentions
700
+ ):
701
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
702
+ attention_mask,
703
+ inputs_embeds=input_tensor,
704
+ past_key_values_length=past_seen_tokens,
705
+ sliding_window=self.config.sliding_window,
706
+ is_training=self.training,
707
+ ):
708
+ return None
709
+
710
+ dtype, device = input_tensor.dtype, input_tensor.device
711
+ min_dtype = torch.finfo(dtype).min
712
+ sequence_length = input_tensor.shape[1]
713
+ # SlidingWindowCache or StaticCache
714
+ if using_sliding_window_cache or using_static_cache:
715
+ target_length = past_key_values.get_max_cache_shape()
716
+ # DynamicCache or no cache
717
+ else:
718
+ target_length = (
719
+ attention_mask.shape[-1]
720
+ if isinstance(attention_mask, torch.Tensor)
721
+ else past_seen_tokens + sequence_length + 1
722
+ )
723
+
724
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
725
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
726
+ attention_mask,
727
+ sequence_length=sequence_length,
728
+ target_length=target_length,
729
+ dtype=dtype,
730
+ device=device,
731
+ cache_position=cache_position,
732
+ batch_size=input_tensor.shape[0],
733
+ config=self.config,
734
+ past_key_values=past_key_values,
735
+ )
736
+
737
+ if (
738
+ self.config._attn_implementation == "sdpa"
739
+ and attention_mask is not None
740
+ and attention_mask.device.type in ["cuda", "xpu"]
741
+ and not output_attentions
742
+ ):
743
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
744
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
745
+ # Details: https://github.com/pytorch/pytorch/issues/110213
746
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
747
+
748
+ return causal_mask
749
+
750
+ @staticmethod
751
+ def _prepare_4d_causal_attention_mask_with_cache_position(
752
+ attention_mask: torch.Tensor,
753
+ sequence_length: int,
754
+ target_length: int,
755
+ dtype: torch.dtype,
756
+ device: torch.device,
757
+ cache_position: torch.Tensor,
758
+ batch_size: int,
759
+ config: Phi3Config,
760
+ past_key_values: Cache,
761
+ ):
762
+ """
763
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
764
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
765
+
766
+ Args:
767
+ attention_mask (`torch.Tensor`):
768
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
769
+ sequence_length (`int`):
770
+ The sequence length being processed.
771
+ target_length (`int`):
772
+ The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
773
+ dtype (`torch.dtype`):
774
+ The dtype to use for the 4D attention mask.
775
+ device (`torch.device`):
776
+ The device to plcae the 4D attention mask on.
777
+ cache_position (`torch.Tensor`):
778
+ Indices depicting the position of the input sequence tokens in the sequence.
779
+ batch_size (`torch.Tensor`):
780
+ Batch size.
781
+ config (`Phi3Config`):
782
+ The model's configuration class
783
+ past_key_values (`Cache`):
784
+ The cache class that is being used currently to generate
785
+ """
786
+ if attention_mask is not None and attention_mask.dim() == 4:
787
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
788
+ causal_mask = attention_mask
789
+ else:
790
+ min_dtype = torch.finfo(dtype).min
791
+ causal_mask = torch.full(
792
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
793
+ )
794
+ diagonal_attend_mask = torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
795
+ if config.sliding_window is not None:
796
+ # if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also
797
+ # the check is needed to verify is current checkpoint was trained with sliding window or not
798
+ if not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length:
799
+ sliding_attend_mask = torch.arange(target_length, device=device) <= (
800
+ cache_position.reshape(-1, 1) - config.sliding_window
801
+ )
802
+ diagonal_attend_mask.bitwise_or_(sliding_attend_mask)
803
+ causal_mask *= diagonal_attend_mask
804
+ causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
805
+ if attention_mask is not None:
806
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
807
+ if attention_mask.shape[-1] > target_length:
808
+ attention_mask = attention_mask[:, :target_length]
809
+ mask_length = attention_mask.shape[-1]
810
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
811
+ causal_mask.device
812
+ )
813
+ padding_mask = padding_mask == 0
814
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
815
+ padding_mask, min_dtype
816
+ )
817
+ return causal_mask
818
+
819
+
820
+ class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
821
+
822
+
823
+ class Phi3ForCausalLM(Phi3PreTrainedModel, GenerationMixin):
824
+ _tied_weights_keys = ["lm_head.weight"]
825
+ _tp_plan = {"lm_head": "colwise_rep"}
826
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
827
+
828
+ def __init__(self, config):
829
+ super().__init__(config)
830
+ self.model = Phi3Model(config)
831
+ self.vocab_size = config.vocab_size
832
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
833
+
834
+ # Initialize weights and apply final processing
835
+ self.post_init()
836
+
837
+ def get_input_embeddings(self):
838
+ return self.model.embed_tokens
839
+
840
+ def set_input_embeddings(self, value):
841
+ self.model.embed_tokens = value
842
+
843
+ def get_output_embeddings(self):
844
+ return self.lm_head
845
+
846
+ def set_output_embeddings(self, new_embeddings):
847
+ self.lm_head = new_embeddings
848
+
849
+ def set_decoder(self, decoder):
850
+ self.model = decoder
851
+
852
+ def get_decoder(self):
853
+ return self.model
854
+
855
+ @deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
856
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
857
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
858
+ def forward(
859
+ self,
860
+ input_ids: torch.LongTensor = None,
861
+ attention_mask: Optional[torch.Tensor] = None,
862
+ position_ids: Optional[torch.LongTensor] = None,
863
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
864
+ inputs_embeds: Optional[torch.FloatTensor] = None,
865
+ labels: Optional[torch.LongTensor] = None,
866
+ use_cache: Optional[bool] = None,
867
+ output_attentions: Optional[bool] = None,
868
+ output_hidden_states: Optional[bool] = None,
869
+ return_dict: Optional[bool] = None,
870
+ cache_position: Optional[torch.LongTensor] = None,
871
+ logits_to_keep: Union[int, torch.Tensor] = 0,
872
+ **kwargs: Unpack[KwargsForCausalLM],
873
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
874
+ r"""
875
+ Args:
876
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
877
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
878
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
879
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
880
+
881
+ logits_to_keep (`int` or `torch.Tensor`, *optional*):
882
+ If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
883
+ `input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
884
+ token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
885
+ If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
886
+ This is useful when using packed tensor format (single dimension for batch and sequence length).
887
+
888
+ Returns:
889
+
890
+ Example:
891
+
892
+ ```python
893
+ >>> from transformers import AutoTokenizer, Phi3ForCausalLM
894
+
895
+ >>> model = Phi3ForCausalLM.from_pretrained("meta-phi3/Phi3-2-7b-hf")
896
+ >>> tokenizer = AutoTokenizer.from_pretrained("meta-phi3/Phi3-2-7b-hf")
897
+
898
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
899
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
900
+
901
+ >>> # Generate
902
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
903
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
904
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
905
+ ```"""
906
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
907
+ output_hidden_states = (
908
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
909
+ )
910
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
911
+
912
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
913
+ outputs = self.model(
914
+ input_ids=input_ids,
915
+ attention_mask=attention_mask,
916
+ position_ids=position_ids,
917
+ past_key_values=past_key_values,
918
+ inputs_embeds=inputs_embeds,
919
+ use_cache=use_cache,
920
+ output_attentions=output_attentions,
921
+ output_hidden_states=output_hidden_states,
922
+ return_dict=return_dict,
923
+ cache_position=cache_position,
924
+ **kwargs,
925
+ )
926
+
927
+ hidden_states = outputs[0]
928
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
929
+ slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
930
+ logits = self.lm_head(hidden_states[:, slice_indices, :])
931
+
932
+ loss = None
933
+ if labels is not None:
934
+ loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
935
+
936
+ if not return_dict:
937
+ output = (logits,) + outputs[1:]
938
+ return (loss,) + output if loss is not None else output
939
+
940
+ return CausalLMOutputWithPast(
941
+ loss=loss,
942
+ logits=logits,
943
+ past_key_values=outputs.past_key_values,
944
+ hidden_states=outputs.hidden_states,
945
+ attentions=outputs.attentions,
946
+ )
947
+
948
+ def prepare_inputs_for_generation(
949
+ self,
950
+ input_ids,
951
+ past_key_values=None,
952
+ attention_mask=None,
953
+ inputs_embeds=None,
954
+ cache_position=None,
955
+ position_ids=None,
956
+ use_cache=True,
957
+ logits_to_keep=None,
958
+ **kwargs,
959
+ ):
960
+ # Overwritten -- this model may need to switch between short and long rope, invalidating the cache in the
961
+ # process
962
+
963
+ # When the first time input length reached long and short factor switching point, enforce re-compute cache
964
+ # It will cause downside of slower at this single token position, however, better than current failure.
965
+ if (
966
+ past_key_values
967
+ and self.config.rope_scaling
968
+ and input_ids.shape[1] >= self.config.original_max_position_embeddings + 1
969
+ ):
970
+ past_length = cache_position[0]
971
+ if past_length <= self.config.original_max_position_embeddings:
972
+ past_key_values = None
973
+
974
+ model_inputs = super().prepare_inputs_for_generation(
975
+ input_ids=input_ids,
976
+ past_key_values=past_key_values,
977
+ attention_mask=attention_mask,
978
+ inputs_embeds=inputs_embeds,
979
+ cache_position=cache_position,
980
+ position_ids=position_ids,
981
+ use_cache=use_cache,
982
+ logits_to_keep=logits_to_keep,
983
+ **kwargs,
984
+ )
985
+ return model_inputs
986
+
987
+
988
+ @add_start_docstrings(
989
+ """
990
+ The Phi3 Model transformer with a sequence classification head on top (linear layer).
991
+
992
+ [`Phi3ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
993
+ (e.g. GPT-2) do.
994
+
995
+ Since it does classification on the last token, it requires to know the position of the last token. If a
996
+ `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
997
+ no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
998
+ padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
999
+ each row of the batch).
1000
+ """,
1001
+ PHI3_START_DOCSTRING,
1002
+ )
1003
+ class Phi3ForSequenceClassification(Phi3PreTrainedModel):
1004
+ def __init__(self, config):
1005
+ super().__init__(config)
1006
+ self.num_labels = config.num_labels
1007
+ self.model = Phi3Model(config)
1008
+ self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
1009
+
1010
+ # Initialize weights and apply final processing
1011
+ self.post_init()
1012
+
1013
+ def get_input_embeddings(self):
1014
+ return self.model.embed_tokens
1015
+
1016
+ def set_input_embeddings(self, value):
1017
+ self.model.embed_tokens = value
1018
+
1019
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
1020
+ def forward(
1021
+ self,
1022
+ input_ids: Optional[torch.LongTensor] = None,
1023
+ attention_mask: Optional[torch.Tensor] = None,
1024
+ position_ids: Optional[torch.LongTensor] = None,
1025
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
1026
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1027
+ labels: Optional[torch.LongTensor] = None,
1028
+ use_cache: Optional[bool] = None,
1029
+ output_attentions: Optional[bool] = None,
1030
+ output_hidden_states: Optional[bool] = None,
1031
+ return_dict: Optional[bool] = None,
1032
+ ) -> Union[Tuple, SequenceClassifierOutputWithPast]:
1033
+ r"""
1034
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1035
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1036
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1037
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1038
+ """
1039
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1040
+
1041
+ transformer_outputs = self.model(
1042
+ input_ids,
1043
+ attention_mask=attention_mask,
1044
+ position_ids=position_ids,
1045
+ past_key_values=past_key_values,
1046
+ inputs_embeds=inputs_embeds,
1047
+ use_cache=use_cache,
1048
+ output_attentions=output_attentions,
1049
+ output_hidden_states=output_hidden_states,
1050
+ return_dict=return_dict,
1051
+ )
1052
+ hidden_states = transformer_outputs[0]
1053
+ logits = self.score(hidden_states)
1054
+
1055
+ if input_ids is not None:
1056
+ batch_size = input_ids.shape[0]
1057
+ else:
1058
+ batch_size = inputs_embeds.shape[0]
1059
+
1060
+ if self.config.pad_token_id is None and batch_size != 1:
1061
+ raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
1062
+ if self.config.pad_token_id is None:
1063
+ last_non_pad_token = -1
1064
+ elif input_ids is not None:
1065
+ # To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id
1066
+ non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
1067
+ token_indices = torch.arange(input_ids.shape[-1], device=logits.device)
1068
+ last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
1069
+ else:
1070
+ last_non_pad_token = -1
1071
+ logger.warning_once(
1072
+ f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
1073
+ "unexpected if using padding tokens in conjunction with `inputs_embeds.`"
1074
+ )
1075
+
1076
+ pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
1077
+
1078
+ loss = None
1079
+ if labels is not None:
1080
+ loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config)
1081
+
1082
+ if not return_dict:
1083
+ output = (pooled_logits,) + transformer_outputs[1:]
1084
+ return ((loss,) + output) if loss is not None else output
1085
+
1086
+ return SequenceClassifierOutputWithPast(
1087
+ loss=loss,
1088
+ logits=pooled_logits,
1089
+ past_key_values=transformer_outputs.past_key_values,
1090
+ hidden_states=transformer_outputs.hidden_states,
1091
+ attentions=transformer_outputs.attentions,
1092
+ )
1093
+
1094
+
1095
+ @add_start_docstrings(
1096
+ """
1097
+ The Phi3 Model transformer with a token classification head on top (a linear layer on top of the hidden-states
1098
+ output) e.g. for Named-Entity-Recognition (NER) tasks.
1099
+ """,
1100
+ PHI3_START_DOCSTRING,
1101
+ )
1102
+ class Phi3ForTokenClassification(Phi3PreTrainedModel):
1103
+ def __init__(self, config):
1104
+ super().__init__(config)
1105
+ self.num_labels = config.num_labels
1106
+ self.model = Phi3Model(config)
1107
+ if getattr(config, "classifier_dropout", None) is not None:
1108
+ classifier_dropout = config.classifier_dropout
1109
+ elif getattr(config, "hidden_dropout", None) is not None:
1110
+ classifier_dropout = config.hidden_dropout
1111
+ else:
1112
+ classifier_dropout = 0.1
1113
+ self.dropout = nn.Dropout(classifier_dropout)
1114
+ self.score = nn.Linear(config.hidden_size, config.num_labels)
1115
+
1116
+ # Initialize weights and apply final processing
1117
+ self.post_init()
1118
+
1119
+ def get_input_embeddings(self):
1120
+ return self.model.embed_tokens
1121
+
1122
+ def set_input_embeddings(self, value):
1123
+ self.model.embed_tokens = value
1124
+
1125
+ @add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
1126
+ @add_code_sample_docstrings(
1127
+ checkpoint=_CHECKPOINT_FOR_DOC,
1128
+ output_type=TokenClassifierOutput,
1129
+ config_class=_CONFIG_FOR_DOC,
1130
+ )
1131
+ def forward(
1132
+ self,
1133
+ input_ids: Optional[torch.LongTensor] = None,
1134
+ attention_mask: Optional[torch.Tensor] = None,
1135
+ position_ids: Optional[torch.LongTensor] = None,
1136
+ past_key_values: Optional[List[torch.FloatTensor]] = None,
1137
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1138
+ labels: Optional[torch.LongTensor] = None,
1139
+ use_cache: Optional[bool] = None,
1140
+ output_attentions: Optional[bool] = None,
1141
+ output_hidden_states: Optional[bool] = None,
1142
+ return_dict: Optional[bool] = None,
1143
+ ) -> Union[Tuple, TokenClassifierOutput]:
1144
+ r"""
1145
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1146
+ Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
1147
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1148
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1149
+ """
1150
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1151
+
1152
+ outputs = self.model(
1153
+ input_ids,
1154
+ attention_mask=attention_mask,
1155
+ position_ids=position_ids,
1156
+ past_key_values=past_key_values,
1157
+ inputs_embeds=inputs_embeds,
1158
+ use_cache=use_cache,
1159
+ output_attentions=output_attentions,
1160
+ output_hidden_states=output_hidden_states,
1161
+ return_dict=return_dict,
1162
+ )
1163
+ sequence_output = outputs[0]
1164
+ sequence_output = self.dropout(sequence_output)
1165
+ logits = self.score(sequence_output)
1166
+
1167
+ loss = None
1168
+ if labels is not None:
1169
+ loss = self.loss_function(logits, labels, self.config)
1170
+
1171
+ if not return_dict:
1172
+ output = (logits,) + outputs[2:]
1173
+ return ((loss,) + output) if loss is not None else output
1174
+
1175
+ return TokenClassifierOutput(
1176
+ loss=loss,
1177
+ logits=logits,
1178
+ hidden_states=outputs.hidden_states,
1179
+ attentions=outputs.attentions,
1180
+ )
sample_finetune.py ADDED
@@ -0,0 +1,214 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import logging
3
+
4
+ import datasets
5
+ from datasets import load_dataset
6
+ from peft import LoraConfig
7
+ import torch
8
+ import transformers
9
+ from trl import SFTTrainer
10
+ from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, BitsAndBytesConfig
11
+
12
+ """
13
+ A simple example on using SFTTrainer and Accelerate to finetune Phi-4-Mini-Instruct model. For
14
+ a more advanced example, please follow HF alignment-handbook/scripts/run_sft.py.
15
+ This example has utilized DeepSpeed ZeRO3 offload to reduce the memory usage. The
16
+ script can be run on V100 or later generation GPUs. Here are some suggestions on
17
+ futher reducing memory consumption:
18
+ - reduce batch size
19
+ - decrease lora dimension
20
+ - restrict lora target modules
21
+ Please follow these steps to run the script:
22
+ 1. Install dependencies:
23
+ conda install -c conda-forge accelerate=1.3.0
24
+ pip3 install -i https://pypi.org/simple/ bitsandbytes
25
+ pip3 install peft==0.14.0
26
+ pip3 install transformers==4.48.1
27
+ pip3 install trl datasets
28
+ pip3 install deepspeed
29
+ 2. Setup accelerate and deepspeed config based on the machine used:
30
+ accelerate config
31
+ Here is a sample config for deepspeed zero3:
32
+ compute_environment: LOCAL_MACHINE
33
+ debug: false
34
+ deepspeed_config:
35
+ gradient_accumulation_steps: 1
36
+ offload_optimizer_device: none
37
+ offload_param_device: none
38
+ zero3_init_flag: true
39
+ zero3_save_16bit_model: true
40
+ zero_stage: 3
41
+ distributed_type: DEEPSPEED
42
+ downcast_bf16: 'no'
43
+ enable_cpu_affinity: false
44
+ machine_rank: 0
45
+ main_training_function: main
46
+ mixed_precision: bf16
47
+ num_machines: 1
48
+ num_processes: 4
49
+ rdzv_backend: static
50
+ same_network: true
51
+ tpu_env: []
52
+ tpu_use_cluster: false
53
+ tpu_use_sudo: false
54
+ use_cpu: false
55
+ 3. check accelerate config:
56
+ accelerate env
57
+ 4. Run the code:
58
+ accelerate launch sample_finetune.py
59
+ """
60
+
61
+ logger = logging.getLogger(__name__)
62
+
63
+
64
+ ###################
65
+ # Hyper-parameters
66
+ ###################
67
+ training_config = {
68
+ "bf16": True,
69
+ "do_eval": False,
70
+ "learning_rate": 5.0e-06,
71
+ "log_level": "info",
72
+ "logging_steps": 20,
73
+ "logging_strategy": "steps",
74
+ "lr_scheduler_type": "cosine",
75
+ "num_train_epochs": 1,
76
+ "max_steps": -1,
77
+ "output_dir": "./checkpoint_dir",
78
+ "overwrite_output_dir": True,
79
+ "per_device_eval_batch_size": 4,
80
+ "per_device_train_batch_size": 4,
81
+ "remove_unused_columns": True,
82
+ "save_steps": 100,
83
+ "save_total_limit": 1,
84
+ "seed": 0,
85
+ "gradient_checkpointing": True,
86
+ "gradient_checkpointing_kwargs":{"use_reentrant": False},
87
+ "gradient_accumulation_steps": 1,
88
+ "warmup_ratio": 0.2,
89
+ }
90
+
91
+ peft_config = {
92
+ "r": 16,
93
+ "lora_alpha": 32,
94
+ "lora_dropout": 0.05,
95
+ "bias": "none",
96
+ "task_type": "CAUSAL_LM",
97
+ "target_modules": "all-linear",
98
+ "modules_to_save": None,
99
+ }
100
+ train_conf = TrainingArguments(**training_config)
101
+ peft_conf = LoraConfig(**peft_config)
102
+
103
+
104
+ ###############
105
+ # Setup logging
106
+ ###############
107
+ logging.basicConfig(
108
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
109
+ datefmt="%Y-%m-%d %H:%M:%S",
110
+ handlers=[logging.StreamHandler(sys.stdout)],
111
+ )
112
+ log_level = train_conf.get_process_log_level()
113
+ logger.setLevel(log_level)
114
+ datasets.utils.logging.set_verbosity(log_level)
115
+ transformers.utils.logging.set_verbosity(log_level)
116
+ transformers.utils.logging.enable_default_handler()
117
+ transformers.utils.logging.enable_explicit_format()
118
+
119
+ # Log on each process a small summary
120
+ logger.warning(
121
+ f"Process rank: {train_conf.local_rank}, device: {train_conf.device}, n_gpu: {train_conf.n_gpu}"
122
+ + f" distributed training: {bool(train_conf.local_rank != -1)}, 16-bits training: {train_conf.fp16}"
123
+ )
124
+ logger.info(f"Training/evaluation parameters {train_conf}")
125
+ logger.info(f"PEFT parameters {peft_conf}")
126
+
127
+
128
+ ################
129
+ # Model Loading
130
+ ################
131
+ checkpoint_path = "microsoft/Phi-4-mini-instruct"
132
+ model_kwargs = dict(
133
+ use_cache=False,
134
+ trust_remote_code=True,
135
+ attn_implementation="flash_attention_2", # loading the model with flash-attenstion support
136
+ torch_dtype=torch.bfloat16,
137
+ device_map=None
138
+ )
139
+ model = AutoModelForCausalLM.from_pretrained(checkpoint_path, **model_kwargs)
140
+ tokenizer = AutoTokenizer.from_pretrained(checkpoint_path)
141
+ tokenizer.model_max_length = 2048
142
+ tokenizer.pad_token = tokenizer.unk_token # use unk rather than eos token to prevent endless generation
143
+ tokenizer.pad_token_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token)
144
+ tokenizer.padding_side = 'right'
145
+
146
+
147
+ ##################
148
+ # Data Processing
149
+ ##################
150
+ def apply_chat_template(
151
+ example,
152
+ tokenizer,
153
+ ):
154
+ messages = example["messages"]
155
+ example["text"] = tokenizer.apply_chat_template(
156
+ messages, tokenize=False, add_generation_prompt=False)
157
+ return example
158
+
159
+
160
+ train_dataset, test_dataset = load_dataset("HuggingFaceH4/ultrachat_200k", split=["train_sft", "test_sft"])
161
+ column_names = list(train_dataset.features)
162
+
163
+ processed_train_dataset = train_dataset.map(
164
+ apply_chat_template,
165
+ fn_kwargs={"tokenizer": tokenizer},
166
+ num_proc=10,
167
+ remove_columns=column_names,
168
+ desc="Applying chat template to train_sft",
169
+ )
170
+
171
+ processed_test_dataset = test_dataset.map(
172
+ apply_chat_template,
173
+ fn_kwargs={"tokenizer": tokenizer},
174
+ num_proc=10,
175
+ remove_columns=column_names,
176
+ desc="Applying chat template to test_sft",
177
+ )
178
+
179
+
180
+ ###########
181
+ # Training
182
+ ###########
183
+ trainer = SFTTrainer(
184
+ model=model,
185
+ args=train_conf,
186
+ peft_config=peft_conf,
187
+ train_dataset=processed_train_dataset,
188
+ eval_dataset=processed_test_dataset,
189
+ max_seq_length=2048,
190
+ dataset_text_field="text",
191
+ tokenizer=tokenizer,
192
+ packing=True
193
+ )
194
+ train_result = trainer.train()
195
+ metrics = train_result.metrics
196
+ trainer.log_metrics("train", metrics)
197
+ trainer.save_metrics("train", metrics)
198
+ trainer.save_state()
199
+
200
+
201
+ #############
202
+ # Evaluation
203
+ #############
204
+ tokenizer.padding_side = 'left'
205
+ metrics = trainer.evaluate()
206
+ metrics["eval_samples"] = len(processed_test_dataset)
207
+ trainer.log_metrics("eval", metrics)
208
+ trainer.save_metrics("eval", metrics)
209
+
210
+
211
+ # ############
212
+ # # Save model
213
+ # ############
214
+ trainer.save_model(train_conf.output_dir)
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:382cc235b56c725945e149cc25f191da667c836655efd0857b004320e90e91ea
3
+ size 15524095
tokenizer_config.json ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": false,
5
+ "added_tokens_decoder": {
6
+ "199999": {
7
+ "content": "<|endoftext|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "200018": {
15
+ "content": "<|endofprompt|>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "200019": {
23
+ "content": "<|assistant|>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": true,
27
+ "single_word": false,
28
+ "special": true
29
+ },
30
+ "200020": {
31
+ "content": "<|end|>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": true,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "200021": {
39
+ "content": "<|user|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": true,
43
+ "single_word": false,
44
+ "special": true
45
+ },
46
+ "200022": {
47
+ "content": "<|system|>",
48
+ "lstrip": false,
49
+ "normalized": false,
50
+ "rstrip": true,
51
+ "single_word": false,
52
+ "special": true
53
+ },
54
+ "200023": {
55
+ "content": "<|tool|>",
56
+ "lstrip": false,
57
+ "normalized": false,
58
+ "rstrip": true,
59
+ "single_word": false,
60
+ "special": false
61
+ },
62
+ "200024": {
63
+ "content": "<|/tool|>",
64
+ "lstrip": false,
65
+ "normalized": false,
66
+ "rstrip": true,
67
+ "single_word": false,
68
+ "special": false
69
+ },
70
+ "200025": {
71
+ "content": "<|tool_call|>",
72
+ "lstrip": false,
73
+ "normalized": false,
74
+ "rstrip": true,
75
+ "single_word": false,
76
+ "special": false
77
+ },
78
+ "200026": {
79
+ "content": "<|/tool_call|>",
80
+ "lstrip": false,
81
+ "normalized": false,
82
+ "rstrip": true,
83
+ "single_word": false,
84
+ "special": false
85
+ },
86
+ "200027": {
87
+ "content": "<|tool_response|>",
88
+ "lstrip": false,
89
+ "normalized": false,
90
+ "rstrip": true,
91
+ "single_word": false,
92
+ "special": false
93
+ },
94
+ "200028": {
95
+ "content": "<|tag|>",
96
+ "lstrip": false,
97
+ "normalized": false,
98
+ "rstrip": true,
99
+ "single_word": false,
100
+ "special": true
101
+ }
102
+ },
103
+ "bos_token": "<|endoftext|>",
104
+ "chat_template": "{% for message in messages %}{% if message['role'] == 'system' and 'tools' in message and message['tools'] is not none %}{{ '<|' + message['role'] + '|>' + message['content'] + '<|tool|>' + message['tools'] + '<|/tool|>' + '<|end|>' }}{% else %}{{ '<|' + message['role'] + '|>' + message['content'] + '<|end|>' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>' }}{% else %}{{ eos_token }}{% endif %}",
105
+ "clean_up_tokenization_spaces": false,
106
+ "eos_token": "<|endoftext|>",
107
+ "extra_special_tokens": {},
108
+ "model_max_length": 131072,
109
+ "pad_token": "<|endoftext|>",
110
+ "tokenizer_class": "GPT2Tokenizer",
111
+ "unk_token": "<|endoftext|>"
112
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff