Added model files
Browse files- .gitattributes +1 -0
- CODE_OF_CONDUCT.md +9 -0
- LICENSE +22 -0
- NOTICE.md +38 -0
- README.md +345 -0
- SECURITY.md +41 -0
- added_tokens.json +12 -0
- config.json +144 -0
- configuration_phi3.py +226 -0
- generation_config.json +10 -0
- merges.txt +0 -0
- model-00001-of-00002.safetensors +3 -0
- model-00002-of-00002.safetensors +3 -0
- model.safetensors.index.json +201 -0
- modeling_phi3.py +1180 -0
- sample_finetune.py +214 -0
- special_tokens_map.json +30 -0
- tokenizer.json +3 -0
- tokenizer_config.json +111 -0
- vocab.json +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
CODE_OF_CONDUCT.md
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Microsoft Open Source Code of Conduct
|
2 |
+
|
3 |
+
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
4 |
+
|
5 |
+
Resources:
|
6 |
+
|
7 |
+
- [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/)
|
8 |
+
- [Microsoft Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/)
|
9 |
+
- Contact [[email protected]](mailto:[email protected]) with questions or concerns
|
LICENSE
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Microsoft.
|
2 |
+
Copyright (c) Microsoft Corporation.
|
3 |
+
|
4 |
+
MIT License
|
5 |
+
|
6 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
7 |
+
of this software and associated documentation files (the "Software"), to deal
|
8 |
+
in the Software without restriction, including without limitation the rights
|
9 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
10 |
+
copies of the Software, and to permit persons to whom the Software is
|
11 |
+
furnished to do so, subject to the following conditions:
|
12 |
+
|
13 |
+
The above copyright notice and this permission notice shall be included in all
|
14 |
+
copies or substantial portions of the Software.
|
15 |
+
|
16 |
+
THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
17 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
18 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
19 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
20 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
21 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
22 |
+
SOFTWARE.
|
NOTICE.md
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
NOTICES AND INFORMATION
|
2 |
+
Do Not Translate or Localize
|
3 |
+
|
4 |
+
This software incorporates material from third parties.
|
5 |
+
|
6 |
+
**Component.** https://github.com/Dao-AILab/flash-attention
|
7 |
+
|
8 |
+
**Open Source License/Copyright Notice.**
|
9 |
+
|
10 |
+
BSD 3-Clause License
|
11 |
+
|
12 |
+
Copyright (c) 2022, the respective contributors, as shown by the AUTHORS file.
|
13 |
+
All rights reserved.
|
14 |
+
|
15 |
+
Redistribution and use in source and binary forms, with or without
|
16 |
+
modification, are permitted provided that the following conditions are met:
|
17 |
+
|
18 |
+
* Redistributions of source code must retain the above copyright notice, this
|
19 |
+
list of conditions and the following disclaimer.
|
20 |
+
|
21 |
+
* Redistributions in binary form must reproduce the above copyright notice,
|
22 |
+
this list of conditions and the following disclaimer in the documentation
|
23 |
+
and/or other materials provided with the distribution.
|
24 |
+
|
25 |
+
* Neither the name of the copyright holder nor the names of its
|
26 |
+
contributors may be used to endorse or promote products derived from
|
27 |
+
this software without specific prior written permission.
|
28 |
+
|
29 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
30 |
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
31 |
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
32 |
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
33 |
+
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
34 |
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
35 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
36 |
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
37 |
+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
38 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
README.md
ADDED
@@ -0,0 +1,345 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: mit
|
3 |
+
license_link: https://huggingface.co/microsoft/Phi-4-mini-instruct/resolve/main/LICENSE
|
4 |
+
language:
|
5 |
+
- multilingual
|
6 |
+
pipeline_tag: text-generation
|
7 |
+
tags:
|
8 |
+
- nlp
|
9 |
+
- code
|
10 |
+
widget:
|
11 |
+
- messages:
|
12 |
+
- role: user
|
13 |
+
content: Can you provide ways to eat combinations of bananas and dragonfruits?
|
14 |
+
library_name: transformers
|
15 |
+
---
|
16 |
+
|
17 |
+
## Model Summary
|
18 |
+
|
19 |
+
Phi-4-mini-instruct is a lightweight open model built upon synthetic data and filtered publicly available websites - with a focus on high-quality, reasoning dense data. The model belongs to the Phi-4 model family and supports 128K token context length. The model underwent an enhancement process, incorporating both supervised fine-tuning and direct preference optimization to support precise instruction adherence and robust safety measures.
|
20 |
+
|
21 |
+
🏡 [Phi-4 Portal]() <br>
|
22 |
+
📰 [Phi-4 Microsoft Blog]() <br>
|
23 |
+
📖 [Phi-4 Technical Report]() <br>
|
24 |
+
👩🍳 [Phi-4 Cookbook]() <br>
|
25 |
+
🖥️ [Try It]() <br>
|
26 |
+
|
27 |
+
**Phi-4**: [[mini-instruct](https://huggingface.co/microsoft/Phi-4-mini-instruct) | [onnx]()]; [[multimodal-instruct]]();
|
28 |
+
|
29 |
+
## Intended Uses
|
30 |
+
|
31 |
+
### Primary Use Cases
|
32 |
+
|
33 |
+
The model is intended for broad multilingual commercial and research use. The model provides uses for general purpose AI systems and applications which require:
|
34 |
+
|
35 |
+
1) Memory/compute constrained environments
|
36 |
+
2) Latency bound scenarios
|
37 |
+
3) Strong reasoning (especially math and logic).
|
38 |
+
|
39 |
+
The model is designed to accelerate research on language and multimodal models, for use as a building block for generative AI powered features.
|
40 |
+
|
41 |
+
### Use Case Considerations
|
42 |
+
|
43 |
+
The model is not specifically designed or evaluated for all downstream purposes. Developers should consider common limitations of language models, as well as performance difference across languages, as they select use cases, and evaluate and mitigate for accuracy, safety, and fairness before using within a specific downstream use case, particularly for high-risk scenarios.
|
44 |
+
Developers should be aware of and adhere to applicable laws or regulations (including but not limited to privacy, trade compliance laws, etc.) that are relevant to their use case.
|
45 |
+
|
46 |
+
***Nothing contained in this Model Card should be interpreted as or deemed a restriction or modification to the license the model is released under.***
|
47 |
+
|
48 |
+
## Release Notes
|
49 |
+
|
50 |
+
This release of Phi-4-mini-instruct is based on valuable user feedback from the Phi-3 series. The Phi-4-mini model employed new architecture for efficiency, larger vocabulary for multilingual support, and better post-training techniques were used for instruction following, function calling, as well as additional data leading to substantial gains on key capabilities. It is anticipated that most use cases will benefit from this release, but users are encouraged to test in their particular AI applications. The enthusiastic support for the Phi-4 series is greatly appreciated. Feedback on Phi-4-mini-instruct is welcomed and crucial to the model’s evolution and improvement.
|
51 |
+
|
52 |
+
### Model Quality
|
53 |
+
|
54 |
+
To understand the capabilities, the 3.8B parameters Phi-4-mini-instruct model was compared with a set of models over a variety of benchmarks using an internal benchmark platform (See Appendix A for benchmark methodology). A high-level overview of the model quality is as follows:
|
55 |
+
|
56 |
+
| Benchmark | Similar size | | | | |2x size | | | | | |
|
57 |
+
|----------------------------------|-------------|-------------------|-------------------|-------------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|-----------------|
|
58 |
+
| | Phi-4 mini-Ins | Phi-3.5-mini-Ins | Llama-3.2-3B-Ins | Mistral-3B | Qwen2.5-3B-Ins | Qwen2.5-7B-Ins | Mistral-8B-2410 | Llama-3.1-8B-Ins | Llama-3.1-Tulu-3-8B | Gemma-2.9B-Ins | GPT-4o-mini-2024-07-18 |
|
59 |
+
| **Popular aggregated benchmark** | | | | | | | | | | | |
|
60 |
+
| Arena Hard | 32.8 | 34.4 | 17.0 | 26.9 | 32.0 | 55.5 | 37.3 | 25.7 | 42.7 | 43.7 | 53.7 |
|
61 |
+
| BigBench Hard (0-shot, CoT) | 70.4 | 63.1 | 55.4 | 51.2 | 56.2 | 72.4 | 53.3 | 63.4 | 55.5 | 65.7 | 80.4 |
|
62 |
+
| MMLU (5-shot) | 67.3 | 65.5 | 61.8 | 60.8 | 65.0 | 72.6 | 63.0 | 68.1 | 65.0 | 71.3 | 77.2 |
|
63 |
+
| MMLU-Pro (0-shot, CoT) | 52.8 | 47.4 | 39.2 | 35.3 | 44.7 | 56.2 | 36.6 | 44.0 | 40.9 | 50.1 | 62.8 |
|
64 |
+
| **Reasoning** | | | | | | | | | | | |
|
65 |
+
| ARC Challenge (10-shot) | 83.7 | 84.6 | 76.1 | 80.3 | 82.6 | 90.1 | 82.7 | 83.1 | 79.4 | 89.8 | 93.5 |
|
66 |
+
| BoolQ (2-shot) | 81.2 | 77.7 | 71.4 | 79.4 | 65.4 | 80.0 | 80.5 | 82.8 | 79.3 | 85.7 | 88.7 |
|
67 |
+
| GPQA (0-shot, CoT) | 25.2 | 26.6 | 24.3 | 24.4 | 23.4 | 30.6 | 26.3 | 26.3 | 29.9 | 39.1 | 41.1 |
|
68 |
+
| HellaSwag (5-shot) | 69.1 | 72.2 | 77.2 | 74.6 | 74.6 | 80.0 | 73.5 | 72.8 | 80.9 | 87.1 | 88.7 |
|
69 |
+
| OpenBookQA (10-shot) | 79.2 | 81.2 | 72.6 | 79.8 | 79.3 | 82.6 | 80.2 | 84.8 | 79.8 | 90.0 | 90.0 |
|
70 |
+
| PIQA (5-shot) | 77.6 | 78.2 | 68.2 | 73.2 | 72.6 | 76.2 | 81.2 | 83.2 | 78.3 | 83.7 | 88.7 |
|
71 |
+
| Social IQA (5-shot) | 72.5 | 75.1 | 68.3 | 73.9 | 75.3 | 75.3 | 77.6 | 71.8 | 73.4 | 74.7 | 82.9 |
|
72 |
+
| TruthfulQA (MC2) (10-shot) | 66.4 | 65.2 | 59.2 | 62.9 | 64.3 | 69.4 | 63.0 | 69.2 | 64.1 | 76.6 | 78.2 |
|
73 |
+
| Winogrande (5-shot) | 67.0 | 72.2 | 53.2 | 59.8 | 63.3 | 71.1 | 63.1 | 64.7 | 65.4 | 74.0 | 76.9 |
|
74 |
+
| **Multilingual** | | | | | | | | | | | |
|
75 |
+
| Multilingual MMLU (5-shot) | 49.3 | 51.8 | 48.1 | 46.4 | 55.9 | 64.4 | 53.7 | 56.2 | 54.5 | 63.8 | 72.9 |
|
76 |
+
| MGSM (0-shot, CoT) | 63.9 | 49.6 | 44.6 | 44.6 | 53.5 | 64.5 | 56.7 | 56.7 | 58.6 | 75.1 | 81.7 |
|
77 |
+
| **Math** | | | | | | | | | | | |
|
78 |
+
| GSM8K (8-shot, CoT) | 88.6 | 76.9 | 75.6 | 80.1 | 80.6 | 88.7 | 81.9 | 82.4 | 84.3 | 84.9 | 91.3 |
|
79 |
+
| MATH (0-shot, CoT) | 64.0 | 49.8 | 46.7 | 41.8 | 61.7 | 60.4 | 41.6 | 47.6 | 46.1 | 51.3 | 70.2 |
|
80 |
+
| **Overall** | **63.5** | **60.5** | **56.2** | **56.9** | **60.1** | **67.9** | **60.2** | **62.3** | **60.9** | **65.0** | **75.5** |
|
81 |
+
|
82 |
+
Overall, the model with only 3.8B-param achieves a similar level of multilingual language understanding and reasoning ability as much larger models. However, it is still fundamentally limited by its size for certain tasks. The model simply does not have the capacity to store too much factual knowledge, therefore, users may experience factual incorrectness. However, it may be possible to resolve such weakness by augmenting Phi-4 with a search engine, particularly when using the model under RAG settings.
|
83 |
+
|
84 |
+
## Usage
|
85 |
+
|
86 |
+
### Tokenizer
|
87 |
+
|
88 |
+
Phi-4-mini-instruct supports a vocabulary size of up to `200064` tokens. The [tokenizer files](https://huggingface.co/microsoft/Phi-4-mini-instruct/blob/main/added_tokens.json) already provide placeholder tokens that can be used for downstream fine-tuning, but they can also be extended up to the model's vocabulary size.
|
89 |
+
|
90 |
+
### Input Formats
|
91 |
+
|
92 |
+
Given the nature of the training data, the Phi-4-mini-instruct
|
93 |
+
model is best suited for prompts using specific formats.
|
94 |
+
Below are the two primary formats:
|
95 |
+
|
96 |
+
#### Chat format
|
97 |
+
|
98 |
+
This format is used for general conversation and instructions:
|
99 |
+
|
100 |
+
```yaml
|
101 |
+
<|system|>Insert System Message<|end|><|user|>Insert User Message<|end|><|assistant|>
|
102 |
+
```
|
103 |
+
|
104 |
+
#### Tool-enabled function-calling format
|
105 |
+
|
106 |
+
This format is used when the user wants the model to provide function calls based on the given tools. The user should provide the available tools in the system prompt, wrapped by <|tool|> and <|/tool|> tokens. The tools should be specified in JSON format, using a JSON dump structure. Example:
|
107 |
+
|
108 |
+
`
|
109 |
+
<|system|>You are a helpful assistant with some tools.<|tool|>[{"name": "get_weather_updates", "description": "Fetches weather updates for a given city using the RapidAPI Weather API.", "parameters": {"city": {"description": "The name of the city for which to retrieve weather information.", "type": "str", "default": "London"}}}]<|/tool|><|end|><|user|>What is the weather like in Paris today?<|end|><|assistant|>
|
110 |
+
`
|
111 |
+
|
112 |
+
### Inference with vLLM
|
113 |
+
|
114 |
+
#### Requirements
|
115 |
+
|
116 |
+
List of required packages:
|
117 |
+
|
118 |
+
```
|
119 |
+
flash_attn==2.7.4.post1
|
120 |
+
torch==2.6.0
|
121 |
+
vllm>=0.7.2
|
122 |
+
```
|
123 |
+
|
124 |
+
#### Example
|
125 |
+
|
126 |
+
To perform inference using vLLM, you can use the following code snippet:
|
127 |
+
|
128 |
+
```python
|
129 |
+
from vllm import LLM, SamplingParams
|
130 |
+
|
131 |
+
llm = LLM(model="microsoft/Phi-4-mini-instruct", trust_remote_code=True)
|
132 |
+
|
133 |
+
messages = [
|
134 |
+
{"role": "system", "content": "You are a helpful AI assistant."},
|
135 |
+
{"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"},
|
136 |
+
{"role": "assistant", "content": "Sure! Here are some ways to eat bananas and dragonfruits together: 1. Banana and dragonfruit smoothie: Blend bananas and dragonfruits together with some milk and honey. 2. Banana and dragonfruit salad: Mix sliced bananas and dragonfruits together with some lemon juice and honey."},
|
137 |
+
{"role": "user", "content": "What about solving an 2x + 3 = 7 equation?"},
|
138 |
+
]
|
139 |
+
|
140 |
+
sampling_params = SamplingParams(
|
141 |
+
max_tokens=500,
|
142 |
+
temperature=0.0,
|
143 |
+
)
|
144 |
+
|
145 |
+
output = llm.chat(messages=messages, sampling_params=sampling_params)
|
146 |
+
print(output[0].outputs[0].text)
|
147 |
+
```
|
148 |
+
|
149 |
+
### Inference with Transformers
|
150 |
+
|
151 |
+
#### Requirements
|
152 |
+
|
153 |
+
|
154 |
+
Phi-4 family has been integrated in the `4.49.0` version of `transformers`. The current `transformers` version can be verified with: `pip list | grep transformers`.
|
155 |
+
|
156 |
+
List of required packages:
|
157 |
+
|
158 |
+
```
|
159 |
+
flash_attn==2.7.4.post1
|
160 |
+
torch==2.6.0
|
161 |
+
transformers==4.49.0
|
162 |
+
accelerate==1.3.0
|
163 |
+
```
|
164 |
+
|
165 |
+
Phi-4-mini-instruct is also available in [Azure AI Studio]()
|
166 |
+
|
167 |
+
#### Example
|
168 |
+
|
169 |
+
After obtaining the Phi-4-mini-instruct model checkpoints, users can use this sample code for inference.
|
170 |
+
|
171 |
+
```python
|
172 |
+
import torch
|
173 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
174 |
+
|
175 |
+
torch.random.manual_seed(0)
|
176 |
+
|
177 |
+
model = AutoModelForCausalLM.from_pretrained(
|
178 |
+
"microsoft/Phi-4-mini-instruct",
|
179 |
+
device_map="cuda",
|
180 |
+
torch_dtype="auto",
|
181 |
+
trust_remote_code=True,
|
182 |
+
)
|
183 |
+
tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-4-mini-instruct")
|
184 |
+
|
185 |
+
messages = [
|
186 |
+
{"role": "system", "content": "You are a helpful AI assistant."},
|
187 |
+
{"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"},
|
188 |
+
{"role": "assistant", "content": "Sure! Here are some ways to eat bananas and dragonfruits together: 1. Banana and dragonfruit smoothie: Blend bananas and dragonfruits together with some milk and honey. 2. Banana and dragonfruit salad: Mix sliced bananas and dragonfruits together with some lemon juice and honey."},
|
189 |
+
{"role": "user", "content": "What about solving an 2x + 3 = 7 equation?"},
|
190 |
+
]
|
191 |
+
|
192 |
+
pipe = pipeline(
|
193 |
+
"text-generation",
|
194 |
+
model=model,
|
195 |
+
tokenizer=tokenizer,
|
196 |
+
)
|
197 |
+
|
198 |
+
generation_args = {
|
199 |
+
"max_new_tokens": 500,
|
200 |
+
"return_full_text": False,
|
201 |
+
"temperature": 0.0,
|
202 |
+
"do_sample": False,
|
203 |
+
}
|
204 |
+
|
205 |
+
output = pipe(messages, **generation_args)
|
206 |
+
print(output[0]['generated_text'])
|
207 |
+
```
|
208 |
+
|
209 |
+
## Responsible AI Considerations
|
210 |
+
|
211 |
+
Like other language models, the Phi family of models can potentially behave in ways that are unfair, unreliable, or offensive. Some of the limiting behaviors to be aware of include:
|
212 |
+
|
213 |
+
+ Quality of Service: The Phi models are trained primarily on English text and some additional multilingual text. Languages other than English will experience worse performance as well as performance disparities across non-English. English language varieties with less representation in the training data might experience worse performance than standard American English.
|
214 |
+
+ Multilingual performance and safety gaps: We believe it is important to make language models more widely available across different languages, but the Phi 4 models still exhibit challenges common across multilingual releases. As with any deployment of LLMs, developers will be better positioned to test for performance or safety gaps for their linguistic and cultural context and customize the model with additional fine-tuning and appropriate safeguards.
|
215 |
+
+ Representation of Harms & Perpetuation of Stereotypes: These models can over- or under-represent groups of people, erase representation of some groups, or reinforce demeaning or negative stereotypes. Despite safety post-training, these limitations may still be present due to differing levels of representation of different groups, cultural contexts, or prevalence of examples of negative stereotypes in training data that reflect real-world patterns and societal biases.
|
216 |
+
+ Inappropriate or Offensive Content: These models may produce other types of inappropriate or offensive content, which may make it inappropriate to deploy for sensitive contexts without additional mitigations that are specific to the case.
|
217 |
+
+ Information Reliability: Language models can generate nonsensical content or fabricate content that might sound reasonable but is inaccurate or outdated.
|
218 |
+
+ Limited Scope for Code: The majority of Phi 4 training data is based in Python and uses common packages such as "typing, math, random, collections, datetime, itertools". If the model generates Python scripts that utilize other packages or scripts in other languages, it is strongly recommended that users manually verify all API uses.
|
219 |
+
+ Long Conversation: Phi 4 models, like other models, can in some cases generate responses that are repetitive, unhelpful, or inconsistent in very long chat sessions in both English and non-English languages. Developers are encouraged to place appropriate mitigations, like limiting conversation turns to account for the possible conversational drift.
|
220 |
+
|
221 |
+
Developers should apply responsible AI best practices, including mapping, measuring, and mitigating risks associated with their specific use case and cultural, linguistic context. Phi 4 family of models are general purpose models. As developers plan to deploy these models for specific use cases, they are encouraged to fine-tune the models for their use case and leverage the models as part of broader AI systems with language-specific safeguards in place. Important areas for consideration include:
|
222 |
+
|
223 |
+
+ Allocation: Models may not be suitable for scenarios that could have consequential impact on legal status or the allocation of resources or life opportunities (ex: housing, employment, credit, etc.) without further assessments and additional debiasing techniques.
|
224 |
+
+ High-Risk Scenarios: Developers should assess the suitability of using models in high-risk scenarios where unfair, unreliable or offensive outputs might be extremely costly or lead to harm. This includes providing advice in sensitive or expert domains where accuracy and reliability are critical (ex: legal or health advice). Additional safeguards should be implemented at the application level according to the deployment context.
|
225 |
+
+ Misinformation: Models may produce inaccurate information. Developers should follow transparency best practices and inform end-users they are interacting with an AI system. At the application level, developers can build feedback mechanisms and pipelines to ground responses in use-case specific, contextual information, a technique known as Retrieval Augmented Generation (RAG).
|
226 |
+
+ Generation of Harmful Content: Developers should assess outputs for their context and use available safety classifiers or custom solutions appropriate for their use case.
|
227 |
+
+ Misuse: Other forms of misuse such as fraud, spam, or malware production may be possible, and developers should ensure that their applications do not violate applicable laws and regulations.
|
228 |
+
|
229 |
+
|
230 |
+
## Training
|
231 |
+
|
232 |
+
### Model
|
233 |
+
|
234 |
+
+ **Architecture:** Phi-4-mini-instruct has 3.8B parameters and is a dense decoder-only Transformer model. When compared with Phi-3.5-mini, the major changes with Phi-4-mini-instruct are 200K vocabulary, grouped-query attention, and shared input and output embedding.<br>
|
235 |
+
+ **Inputs:** Text. It is best suited for prompts using the chat format.<br>
|
236 |
+
+ **Context length:** 128K tokens<br>
|
237 |
+
+ **GPUs:** 512 A100-80G<br>
|
238 |
+
+ **Training time:** 21 days<br>
|
239 |
+
+ **Training data:** 5T tokens<br>
|
240 |
+
+ **Outputs:** Generated text in response to the input<br>
|
241 |
+
+ **Dates:** Trained between November and December 2024<br>
|
242 |
+
+ **Status:** This is a static model trained on offline datasets with the cutoff date of June 2024 for publicly available data.<br>
|
243 |
+
+ **Supported languages:** Arabic, Chinese, Czech, Danish, Dutch, English, Finnish, French, German, Hebrew, Hungarian, Italian, Japanese, Korean, Norwegian, Polish, Portuguese, Russian, Spanish, Swedish, Thai, Turkish, Ukrainian<br>
|
244 |
+
+ **Release date:** February 2025<br>
|
245 |
+
|
246 |
+
### Training Datasets
|
247 |
+
|
248 |
+
Phi-4-mini’s training data includes a wide variety of sources, totaling 5 trillion tokens, and is a combination of
|
249 |
+
1) publicly available documents filtered for quality, selected high-quality educational data, and code
|
250 |
+
2) newly created synthetic, “textbook-like” data for the purpose of teaching math, coding, common sense reasoning, general knowledge of the world (e.g., science, daily activities, theory of mind, etc.)
|
251 |
+
3) high quality chat format supervised data covering various topics to reflect human preferences on different aspects such as instruct-following, truthfulness, honesty and helpfulness. Focus was placed on the quality of data that could potentially improve the reasoning ability for the model, and the publicly available documents were filtered to contain a preferred level of knowledge. As an example, the result of a game in premier league on a particular day might be good training data for frontier models, but such information was removed to leave more model capacity for reasoning for the model’s small size. More details about data can be found in the Phi-4-mini-instruct technical report.
|
252 |
+
|
253 |
+
The decontamination process involved normalizing and tokenizing the dataset, then generating and comparing n-grams between the target dataset and benchmark datasets. Samples with matching n-grams above a threshold were flagged as contaminated and removed from the dataset. A detailed contamination report was generated, summarizing the matched text, matching ratio, and filtered results for further analysis.
|
254 |
+
|
255 |
+
### Fine-tuning
|
256 |
+
|
257 |
+
A basic example of multi-GPUs supervised fine-tuning (SFT) with TRL and Accelerate modules is provided [here](https://huggingface.co/microsoft/Phi-4-mini-instruct/resolve/main/sample_finetune.py).
|
258 |
+
|
259 |
+
## Safety Evaluation and Red-Teaming
|
260 |
+
|
261 |
+
Various evaluation techniques including red teaming, adversarial conversation simulations, and multilingual safety evaluation benchmark datasets were leveraged to evaluate Phi-4 models’ propensity to produce undesirable outputs across multiple languages and risk categories. Several approaches were used to compensate for the limitations of one approach alone. Findings across the various evaluation methods indicate that safety post-training that was done as detailed in the Phi 3 Safety Post-Training paper had a positive impact across multiple languages and risk categories as observed by refusal rates (refusal to output undesirable outputs) and robustness to jailbreak techniques. Details on prior red team evaluations across Phi models can be found in the Phi 3 Safety Post-Training paper. For this release, the red team tested the model in English, Chinese, Japanese, Spanish, Portuguese, Arabic, Thai, and Russian for the following potential harms: Hate Speech and Bias, Violent Crimes, Specialized Advice, and Election Information. Their findings indicate that the model is resistant to jailbreak techniques across languages, but that language-specific attack prompts leveraging cultural context can cause the model to output harmful content. Another insight was that with function calling scenarios, the model could sometimes hallucinate function names or URL’s. The model may also be more susceptible to longer multi-turn jailbreak techniques across both English and non-English languages. These findings highlight the need for industry-wide investment in the development of high-quality safety evaluation datasets across multiple languages, including low resource languages, and risk areas that account for cultural nuances where those languages are spoken.
|
262 |
+
|
263 |
+
## Software
|
264 |
+
* [PyTorch](https://github.com/pytorch/pytorch)
|
265 |
+
* [Transformers](https://github.com/huggingface/transformers)
|
266 |
+
* [Flash-Attention](https://github.com/HazyResearch/flash-attention)
|
267 |
+
|
268 |
+
## Hardware
|
269 |
+
Note that by default, the Phi-4-mini-instruct model uses flash attention, which requires certain types of GPU hardware to run. We have tested on the following GPU types:
|
270 |
+
* NVIDIA A100
|
271 |
+
* NVIDIA A6000
|
272 |
+
* NVIDIA H100
|
273 |
+
|
274 |
+
If you want to run the model on:
|
275 |
+
* NVIDIA V100 or earlier generation GPUs: call AutoModelForCausalLM.from_pretrained() with attn_implementation="eager"
|
276 |
+
|
277 |
+
## License
|
278 |
+
The model is licensed under the [MIT license](./LICENSE).
|
279 |
+
|
280 |
+
## Trademarks
|
281 |
+
This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft’s Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party’s policies.
|
282 |
+
|
283 |
+
|
284 |
+
## Appendix A: Benchmark Methodology
|
285 |
+
|
286 |
+
We include a brief word on methodology here - and in particular, how we think about optimizing prompts.
|
287 |
+
In an ideal world, we would never change any prompts in our benchmarks to ensure it is always an apples-to-apples comparison when comparing different models. Indeed, this is our default approach, and is the case in the vast majority of models we have run to date.
|
288 |
+
There are, however, some exceptions to this. In some cases, we see a model that performs worse than expected on a given eval due to a failure to respect the output format. For example:
|
289 |
+
|
290 |
+
+ A model may refuse to answer questions (for no apparent reason), or in coding tasks models may prefix their response with “Sure, I can help with that. …” which may break the parser. In such cases, we have opted to try different system messages (e.g. “You must always respond to a question” or “Get to the point!”).
|
291 |
+
+ With some models, we observed that few shots actually hurt model performance. In this case we did allow running the benchmarks with 0-shots for all cases.
|
292 |
+
+ We have tools to convert between chat and completions APIs. When converting a chat prompt to a completion prompt, some models have different keywords e.g. Human vs User. In these cases, we do allow for model-specific mappings for chat to completion prompts.
|
293 |
+
|
294 |
+
However, we do not:
|
295 |
+
|
296 |
+
+ Pick different few-shot examples. Few shots will always be the same when comparing different models.
|
297 |
+
+ Change prompt format: e.g. if it is an A/B/C/D multiple choice, we do not tweak this to 1/2/3/4 multiple choice.
|
298 |
+
|
299 |
+
### Benchmark datasets
|
300 |
+
|
301 |
+
The model was evaluated across a breadth of public and internal benchmarks to understand the model’s capabilities under multiple tasks and conditions. While most evaluations use English, the leading multilingual benchmark was incorporated that covers performance in select languages. More specifically,
|
302 |
+
|
303 |
+
+ Reasoning:
|
304 |
+
+ Winogrande: commonsense reasoning around pronoun resolution
|
305 |
+
+ PIQA: physical commonsense reasoning around everyday situations
|
306 |
+
+ ARC-challenge: grade-school multiple choice science questions
|
307 |
+
+ GPQA: very hard questions written and validated by experts in biology, physics, and chemistry
|
308 |
+
+ MedQA: medical questions answering
|
309 |
+
+ Social IQA: social commonsense intelligence
|
310 |
+
+ BoolQ: natural questions from context
|
311 |
+
+ TruthfulQA: grounded reasoning
|
312 |
+
+ Language understanding:
|
313 |
+
+ HellaSwag: commonsense natural language inference around everyday events
|
314 |
+
+ ANLI: adversarial natural language inference
|
315 |
+
+ Function calling:
|
316 |
+
+ Berkeley function calling function and tool call
|
317 |
+
+ Internal function calling benchmarks
|
318 |
+
+ World knowledge:
|
319 |
+
+ TriviaQA: trivia question on general topics
|
320 |
+
+ Math:
|
321 |
+
+ GSM8K: grade-school math word problems
|
322 |
+
+ GSM8K Hard: grade-school math word problems with large values and some absurdity.
|
323 |
+
+ MATH: challenging competition math problems
|
324 |
+
+ Code:
|
325 |
+
+ HumanEval HumanEval+, MBPP, MBPP+: python coding tasks
|
326 |
+
+ LiveCodeBenh, LiveBench: contamination-free code tasks
|
327 |
+
+ BigCode Bench: challenging programming tasks
|
328 |
+
+ Spider: SQL query tasks
|
329 |
+
+ Internal coding benchmarks
|
330 |
+
+ Instructions following:
|
331 |
+
+ IFEval: verifiable instructions
|
332 |
+
+ Internal instructions following benchmarks
|
333 |
+
+ Multilingual:
|
334 |
+
+ MGSM: multilingual grade-school math
|
335 |
+
+ Multilingual MMLU and MMLU-pro
|
336 |
+
+ MEGA: multilingual NLP tasks
|
337 |
+
+ Popular aggregated datasets: MMLU, MMLU-pro, BigBench-Hard, AGI Eval
|
338 |
+
+ Multi-turn conversations:
|
339 |
+
+ Data generated by in-house adversarial conversation simulation tool
|
340 |
+
+ Single-turn trustworthiness evaluation:
|
341 |
+
+ DecodingTrust: a collection of trustworthiness benchmarks in eight different perspectives
|
342 |
+
+ XSTest: exaggerated safety evaluation
|
343 |
+
+ Toxigen: adversarial and hate speech detection
|
344 |
+
+ Red Team:
|
345 |
+
+ Responses to prompts provided by AI Red Team at Microsoft
|
SECURITY.md
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.9 BLOCK -->
|
2 |
+
|
3 |
+
## Security
|
4 |
+
|
5 |
+
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/Microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet) and [Xamarin](https://github.com/xamarin).
|
6 |
+
|
7 |
+
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/security.md/definition), please report it to us as described below.
|
8 |
+
|
9 |
+
## Reporting Security Issues
|
10 |
+
|
11 |
+
**Please do not report security vulnerabilities through public GitHub issues.**
|
12 |
+
|
13 |
+
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/security.md/msrc/create-report).
|
14 |
+
|
15 |
+
If you prefer to submit without logging in, send email to [[email protected]](mailto:[email protected]). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/security.md/msrc/pgp).
|
16 |
+
|
17 |
+
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://www.microsoft.com/msrc).
|
18 |
+
|
19 |
+
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
|
20 |
+
|
21 |
+
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
|
22 |
+
* Full paths of source file(s) related to the manifestation of the issue
|
23 |
+
* The location of the affected source code (tag/branch/commit or direct URL)
|
24 |
+
* Any special configuration required to reproduce the issue
|
25 |
+
* Step-by-step instructions to reproduce the issue
|
26 |
+
* Proof-of-concept or exploit code (if possible)
|
27 |
+
* Impact of the issue, including how an attacker might exploit the issue
|
28 |
+
|
29 |
+
This information will help us triage your report more quickly.
|
30 |
+
|
31 |
+
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/security.md/msrc/bounty) page for more details about our active programs.
|
32 |
+
|
33 |
+
## Preferred Languages
|
34 |
+
|
35 |
+
We prefer all communications to be in English.
|
36 |
+
|
37 |
+
## Policy
|
38 |
+
|
39 |
+
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/security.md/cvd).
|
40 |
+
|
41 |
+
<!-- END MICROSOFT SECURITY.MD BLOCK -->
|
added_tokens.json
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"<|/tool_call|>": 200026,
|
3 |
+
"<|/tool|>": 200024,
|
4 |
+
"<|assistant|>": 200019,
|
5 |
+
"<|end|>": 200020,
|
6 |
+
"<|system|>": 200022,
|
7 |
+
"<|tag|>": 200028,
|
8 |
+
"<|tool_call|>": 200025,
|
9 |
+
"<|tool_response|>": 200027,
|
10 |
+
"<|tool|>": 200023,
|
11 |
+
"<|user|>": 200021
|
12 |
+
}
|
config.json
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "Phi-4-mini-instruct",
|
3 |
+
"architectures": [
|
4 |
+
"Phi3ForCausalLM"
|
5 |
+
],
|
6 |
+
"attention_bias": false,
|
7 |
+
"attention_dropout": 0.0,
|
8 |
+
"auto_map": {
|
9 |
+
"AutoConfig": "configuration_phi3.Phi3Config",
|
10 |
+
"AutoModelForCausalLM": "modeling_phi3.Phi3ForCausalLM",
|
11 |
+
"AutoTokenizer": "Xenova/gpt-4o"
|
12 |
+
},
|
13 |
+
"bos_token_id": 199999,
|
14 |
+
"embd_pdrop": 0.0,
|
15 |
+
"eos_token_id": 199999,
|
16 |
+
"full_attn_mod": 1,
|
17 |
+
"hidden_act": "silu",
|
18 |
+
"hidden_size": 3072,
|
19 |
+
"initializer_range": 0.02,
|
20 |
+
"intermediate_size": 8192,
|
21 |
+
"interpolate_factor": 1,
|
22 |
+
"lm_head_bias": false,
|
23 |
+
"max_position_embeddings": 131072,
|
24 |
+
"mlp_bias": false,
|
25 |
+
"model_type": "phi3",
|
26 |
+
"num_attention_heads": 24,
|
27 |
+
"num_hidden_layers": 32,
|
28 |
+
"num_key_value_heads": 8,
|
29 |
+
"original_max_position_embeddings": 4096,
|
30 |
+
"pad_token_id": 199999,
|
31 |
+
"partial_rotary_factor": 0.75,
|
32 |
+
"resid_pdrop": 0.0,
|
33 |
+
"rms_norm_eps": 1e-05,
|
34 |
+
"rope_scaling": {
|
35 |
+
"long_factor": [
|
36 |
+
1,
|
37 |
+
1.118320672,
|
38 |
+
1.250641126,
|
39 |
+
1.398617824,
|
40 |
+
1.564103225,
|
41 |
+
1.74916897,
|
42 |
+
1.956131817,
|
43 |
+
2.187582649,
|
44 |
+
2.446418898,
|
45 |
+
2.735880826,
|
46 |
+
3.059592084,
|
47 |
+
3.421605075,
|
48 |
+
3.826451687,
|
49 |
+
4.279200023,
|
50 |
+
4.785517845,
|
51 |
+
5.351743533,
|
52 |
+
5.984965424,
|
53 |
+
6.693110555,
|
54 |
+
7.485043894,
|
55 |
+
8.370679318,
|
56 |
+
9.36110372,
|
57 |
+
10.4687158,
|
58 |
+
11.70738129,
|
59 |
+
13.09260651,
|
60 |
+
14.64173252,
|
61 |
+
16.37415215,
|
62 |
+
18.31155283,
|
63 |
+
20.47818807,
|
64 |
+
22.90118105,
|
65 |
+
25.61086418,
|
66 |
+
28.64115884,
|
67 |
+
32.03,
|
68 |
+
32.1,
|
69 |
+
32.13,
|
70 |
+
32.23,
|
71 |
+
32.6,
|
72 |
+
32.61,
|
73 |
+
32.64,
|
74 |
+
32.66,
|
75 |
+
32.7,
|
76 |
+
32.71,
|
77 |
+
32.93,
|
78 |
+
32.97,
|
79 |
+
33.28,
|
80 |
+
33.49,
|
81 |
+
33.5,
|
82 |
+
44.16,
|
83 |
+
47.77
|
84 |
+
],
|
85 |
+
"short_factor": [
|
86 |
+
1.0,
|
87 |
+
1.0,
|
88 |
+
1.0,
|
89 |
+
1.0,
|
90 |
+
1.0,
|
91 |
+
1.0,
|
92 |
+
1.0,
|
93 |
+
1.0,
|
94 |
+
1.0,
|
95 |
+
1.0,
|
96 |
+
1.0,
|
97 |
+
1.0,
|
98 |
+
1.0,
|
99 |
+
1.0,
|
100 |
+
1.0,
|
101 |
+
1.0,
|
102 |
+
1.0,
|
103 |
+
1.0,
|
104 |
+
1.0,
|
105 |
+
1.0,
|
106 |
+
1.0,
|
107 |
+
1.0,
|
108 |
+
1.0,
|
109 |
+
1.0,
|
110 |
+
1.0,
|
111 |
+
1.0,
|
112 |
+
1.0,
|
113 |
+
1.0,
|
114 |
+
1.0,
|
115 |
+
1.0,
|
116 |
+
1.0,
|
117 |
+
1.0,
|
118 |
+
1.0,
|
119 |
+
1.0,
|
120 |
+
1.0,
|
121 |
+
1.0,
|
122 |
+
1.0,
|
123 |
+
1.0,
|
124 |
+
1.0,
|
125 |
+
1.0,
|
126 |
+
1.0,
|
127 |
+
1.0,
|
128 |
+
1.0,
|
129 |
+
1.0,
|
130 |
+
1.0,
|
131 |
+
1.0,
|
132 |
+
1.0,
|
133 |
+
1.0
|
134 |
+
],
|
135 |
+
"type": "longrope"
|
136 |
+
},
|
137 |
+
"rope_theta": 10000.0,
|
138 |
+
"sliding_window": 262144,
|
139 |
+
"tie_word_embeddings": true,
|
140 |
+
"torch_dtype": "bfloat16",
|
141 |
+
"transformers_version": "4.45.0",
|
142 |
+
"use_cache": true,
|
143 |
+
"vocab_size": 200064
|
144 |
+
}
|
configuration_phi3.py
ADDED
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
"""Phi-3 model configuration"""
|
17 |
+
|
18 |
+
from transformers.configuration_utils import PretrainedConfig
|
19 |
+
from transformers.utils import logging
|
20 |
+
|
21 |
+
|
22 |
+
logger = logging.get_logger(__name__)
|
23 |
+
|
24 |
+
|
25 |
+
class Phi3Config(PretrainedConfig):
|
26 |
+
r"""
|
27 |
+
This is the configuration class to store the configuration of a [`Phi3Model`]. It is used to instantiate a Phi-3
|
28 |
+
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
|
29 |
+
defaults will yield a similar configuration to that of the
|
30 |
+
[microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct).
|
31 |
+
|
32 |
+
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
|
33 |
+
documentation from [`PretrainedConfig`] for more information.
|
34 |
+
|
35 |
+
Args:
|
36 |
+
vocab_size (`int`, *optional*, defaults to 32064):
|
37 |
+
Vocabulary size of the Phi-3 model. Defines the number of different tokens that can be represented by the
|
38 |
+
`inputs_ids` passed when calling [`Phi3Model`].
|
39 |
+
hidden_size (`int`, *optional*, defaults to 3072):
|
40 |
+
Dimension of the hidden representations.
|
41 |
+
intermediate_size (`int`, *optional*, defaults to 8192):
|
42 |
+
Dimension of the MLP representations.
|
43 |
+
num_hidden_layers (`int`, *optional*, defaults to 32):
|
44 |
+
Number of hidden layers in the Transformer decoder.
|
45 |
+
num_attention_heads (`int`, *optional*, defaults to 32):
|
46 |
+
Number of attention heads for each attention layer in the Transformer decoder.
|
47 |
+
num_key_value_heads (`int`, *optional*):
|
48 |
+
This is the number of key_value heads that should be used to implement Grouped Query Attention. If
|
49 |
+
`num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
|
50 |
+
`num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
|
51 |
+
converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
|
52 |
+
by meanpooling all the original heads within that group. For more details checkout [this
|
53 |
+
paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
|
54 |
+
`num_attention_heads`.
|
55 |
+
resid_pdrop (`float`, *optional*, defaults to 0.0):
|
56 |
+
Dropout probability for mlp outputs.
|
57 |
+
embd_pdrop (`int`, *optional*, defaults to 0.0):
|
58 |
+
The dropout ratio for the embeddings.
|
59 |
+
attention_dropout (`float`, *optional*, defaults to 0.0):
|
60 |
+
The dropout ratio after computing the attention scores.
|
61 |
+
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
|
62 |
+
The non-linear activation function (function or string) in the decoder.
|
63 |
+
max_position_embeddings (`int`, *optional*, defaults to 4096):
|
64 |
+
The maximum sequence length that this model might ever be used with.
|
65 |
+
original_max_position_embeddings (`int`, *optional*, defaults to 4096):
|
66 |
+
The maximum sequence length that this model was trained with. This is used to determine the size of the
|
67 |
+
original RoPE embeddings when using long scaling.
|
68 |
+
initializer_range (`float`, *optional*, defaults to 0.02):
|
69 |
+
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
|
70 |
+
rms_norm_eps (`float`, *optional*, defaults to 1e-05):
|
71 |
+
The epsilon value used for the RMSNorm.
|
72 |
+
use_cache (`bool`, *optional*, defaults to `True`):
|
73 |
+
Whether or not the model should return the last key/values attentions (not used by all models). Only
|
74 |
+
relevant if `config.is_decoder=True`. Whether to tie weight embeddings or not.
|
75 |
+
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
|
76 |
+
Whether to tie weight embeddings
|
77 |
+
rope_theta (`float`, *optional*, defaults to 10000.0):
|
78 |
+
The base period of the RoPE embeddings.
|
79 |
+
rope_scaling (`dict`, *optional*):
|
80 |
+
The scaling strategy for the RoPE embeddings. If `None`, no scaling is applied. If a dictionary, it must
|
81 |
+
contain the following keys: `type`, `short_factor` and `long_factor`. The `type` must be `longrope` and
|
82 |
+
the `short_factor` and `long_factor` must be lists of numbers with the same length as the hidden size
|
83 |
+
divided by the number of attention heads divided by 2.
|
84 |
+
partial_rotary_factor (`float`, *optional*, defaults to 1.0):
|
85 |
+
Percentage of the query and keys which will have rotary embedding. Must be between 0.0 and 1.0.
|
86 |
+
bos_token_id (`int`, *optional*, defaults to 1):
|
87 |
+
The id of the "beginning-of-sequence" token.
|
88 |
+
eos_token_id (`int`, *optional*, defaults to 32000):
|
89 |
+
The id of the "end-of-sequence" token.
|
90 |
+
pad_token_id (`int`, *optional*, defaults to 32000):
|
91 |
+
The id of the padding token.
|
92 |
+
sliding_window (`int`, *optional*):
|
93 |
+
Sliding window attention window size. If `None`, no sliding window is applied.
|
94 |
+
|
95 |
+
Example:
|
96 |
+
|
97 |
+
```python
|
98 |
+
>>> from transformers import Phi3Model, Phi3Config
|
99 |
+
|
100 |
+
>>> # Initializing a Phi-3 style configuration
|
101 |
+
>>> configuration = Phi3Config.from_pretrained("microsoft/Phi-3-mini-4k-instruct")
|
102 |
+
|
103 |
+
>>> # Initializing a model from the configuration
|
104 |
+
>>> model = Phi3Model(configuration)
|
105 |
+
|
106 |
+
>>> # Accessing the model configuration
|
107 |
+
>>> configuration = model.config
|
108 |
+
```"""
|
109 |
+
|
110 |
+
model_type = "phi3"
|
111 |
+
keys_to_ignore_at_inference = ["past_key_values"]
|
112 |
+
|
113 |
+
def __init__(
|
114 |
+
self,
|
115 |
+
vocab_size=32064,
|
116 |
+
hidden_size=3072,
|
117 |
+
intermediate_size=8192,
|
118 |
+
num_hidden_layers=32,
|
119 |
+
num_attention_heads=32,
|
120 |
+
num_key_value_heads=None,
|
121 |
+
resid_pdrop=0.0,
|
122 |
+
embd_pdrop=0.0,
|
123 |
+
attention_dropout=0.0,
|
124 |
+
hidden_act="silu",
|
125 |
+
max_position_embeddings=4096,
|
126 |
+
original_max_position_embeddings=4096,
|
127 |
+
initializer_range=0.02,
|
128 |
+
rms_norm_eps=1e-5,
|
129 |
+
use_cache=True,
|
130 |
+
tie_word_embeddings=False,
|
131 |
+
rope_theta=10000.0,
|
132 |
+
rope_scaling=None,
|
133 |
+
partial_rotary_factor=1.0,
|
134 |
+
bos_token_id=1,
|
135 |
+
eos_token_id=32000,
|
136 |
+
pad_token_id=32000,
|
137 |
+
sliding_window=None,
|
138 |
+
**kwargs,
|
139 |
+
):
|
140 |
+
self.vocab_size = vocab_size
|
141 |
+
self.hidden_size = hidden_size
|
142 |
+
self.intermediate_size = intermediate_size
|
143 |
+
self.num_hidden_layers = num_hidden_layers
|
144 |
+
self.num_attention_heads = num_attention_heads
|
145 |
+
|
146 |
+
if num_key_value_heads is None:
|
147 |
+
num_key_value_heads = num_attention_heads
|
148 |
+
|
149 |
+
self.num_key_value_heads = num_key_value_heads
|
150 |
+
self.resid_pdrop = resid_pdrop
|
151 |
+
self.embd_pdrop = embd_pdrop
|
152 |
+
self.attention_dropout = attention_dropout
|
153 |
+
self.hidden_act = hidden_act
|
154 |
+
self.max_position_embeddings = max_position_embeddings
|
155 |
+
self.original_max_position_embeddings = original_max_position_embeddings
|
156 |
+
self.initializer_range = initializer_range
|
157 |
+
self.rms_norm_eps = rms_norm_eps
|
158 |
+
self.use_cache = use_cache
|
159 |
+
self.rope_theta = rope_theta
|
160 |
+
self.rope_scaling = rope_scaling
|
161 |
+
self.partial_rotary_factor = partial_rotary_factor
|
162 |
+
self._rope_scaling_adjustment()
|
163 |
+
self._rope_scaling_validation()
|
164 |
+
self.sliding_window = sliding_window
|
165 |
+
|
166 |
+
super().__init__(
|
167 |
+
bos_token_id=bos_token_id,
|
168 |
+
eos_token_id=eos_token_id,
|
169 |
+
pad_token_id=pad_token_id,
|
170 |
+
tie_word_embeddings=tie_word_embeddings,
|
171 |
+
**kwargs,
|
172 |
+
)
|
173 |
+
|
174 |
+
def _rope_scaling_adjustment(self):
|
175 |
+
"""
|
176 |
+
Adjust the `type` of the `rope_scaling` configuration for backward compatibility.
|
177 |
+
"""
|
178 |
+
if self.rope_scaling is None:
|
179 |
+
return
|
180 |
+
|
181 |
+
rope_scaling_type = self.rope_scaling.get("type", None)
|
182 |
+
|
183 |
+
# For backward compatibility if previous version used "su" or "yarn"
|
184 |
+
if rope_scaling_type is not None and rope_scaling_type in ["su", "yarn"]:
|
185 |
+
self.rope_scaling["type"] = "longrope"
|
186 |
+
|
187 |
+
def _rope_scaling_validation(self):
|
188 |
+
"""
|
189 |
+
Validate the `rope_scaling` configuration.
|
190 |
+
"""
|
191 |
+
if self.rope_scaling is None:
|
192 |
+
return
|
193 |
+
|
194 |
+
if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 3:
|
195 |
+
raise ValueError(
|
196 |
+
"`rope_scaling` must be a dictionary with three fields, `type`, `short_factor` and `long_factor`, "
|
197 |
+
f"got {self.rope_scaling}"
|
198 |
+
)
|
199 |
+
rope_scaling_type = self.rope_scaling.get("type", None)
|
200 |
+
rope_scaling_short_factor = self.rope_scaling.get("short_factor", None)
|
201 |
+
rope_scaling_long_factor = self.rope_scaling.get("long_factor", None)
|
202 |
+
if rope_scaling_type is None or rope_scaling_type not in ["longrope"]:
|
203 |
+
raise ValueError(f"`rope_scaling`'s type field must be one of ['longrope'], got {rope_scaling_type}")
|
204 |
+
if not (
|
205 |
+
isinstance(rope_scaling_short_factor, list)
|
206 |
+
and all(isinstance(x, (int, float)) for x in rope_scaling_short_factor)
|
207 |
+
):
|
208 |
+
raise ValueError(
|
209 |
+
f"`rope_scaling`'s short_factor field must be a list of numbers, got {rope_scaling_short_factor}"
|
210 |
+
)
|
211 |
+
rotary_ndims = int(self.hidden_size // self.num_attention_heads * self.partial_rotary_factor)
|
212 |
+
if not len(rope_scaling_short_factor) == rotary_ndims // 2:
|
213 |
+
raise ValueError(
|
214 |
+
f"`rope_scaling`'s short_factor field must have length {rotary_ndims // 2}, got {len(rope_scaling_short_factor)}"
|
215 |
+
)
|
216 |
+
if not (
|
217 |
+
isinstance(rope_scaling_long_factor, list)
|
218 |
+
and all(isinstance(x, (int, float)) for x in rope_scaling_long_factor)
|
219 |
+
):
|
220 |
+
raise ValueError(
|
221 |
+
f"`rope_scaling`'s long_factor field must be a list of numbers, got {rope_scaling_long_factor}"
|
222 |
+
)
|
223 |
+
if not len(rope_scaling_long_factor) == rotary_ndims // 2:
|
224 |
+
raise ValueError(
|
225 |
+
f"`rope_scaling`'s long_factor field must have length {rotary_ndims // 2}, got {len(rope_scaling_long_factor)}"
|
226 |
+
)
|
generation_config.json
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 199999,
|
4 |
+
"eos_token_id": [
|
5 |
+
200020,
|
6 |
+
199999
|
7 |
+
],
|
8 |
+
"pad_token_id": 199999,
|
9 |
+
"transformers_version": "4.45.0"
|
10 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
model-00001-of-00002.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:bc703090b63eda16f639fa4de7ac54635c23105ab1da2f6ec4d3403151d38ee6
|
3 |
+
size 4903637712
|
model-00002-of-00002.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7ff79b9d2d31076bac2663393451f6530f4fc8ca49b09002116c92c373dba983
|
3 |
+
size 2768428504
|
model.safetensors.index.json
ADDED
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 7672043520
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"model.embed_tokens.weight": "model-00001-of-00002.safetensors",
|
7 |
+
"model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
8 |
+
"model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
9 |
+
"model.layers.0.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
|
10 |
+
"model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
11 |
+
"model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
12 |
+
"model.layers.0.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
|
13 |
+
"model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
14 |
+
"model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
15 |
+
"model.layers.1.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
|
16 |
+
"model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
17 |
+
"model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
18 |
+
"model.layers.1.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
|
19 |
+
"model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
20 |
+
"model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
21 |
+
"model.layers.10.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
|
22 |
+
"model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
23 |
+
"model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
24 |
+
"model.layers.10.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
|
25 |
+
"model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
26 |
+
"model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
27 |
+
"model.layers.11.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
|
28 |
+
"model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
29 |
+
"model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
30 |
+
"model.layers.11.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
|
31 |
+
"model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
32 |
+
"model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
33 |
+
"model.layers.12.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
|
34 |
+
"model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
35 |
+
"model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
36 |
+
"model.layers.12.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
|
37 |
+
"model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
38 |
+
"model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
39 |
+
"model.layers.13.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
|
40 |
+
"model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
41 |
+
"model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
42 |
+
"model.layers.13.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
|
43 |
+
"model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
44 |
+
"model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
45 |
+
"model.layers.14.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
|
46 |
+
"model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
47 |
+
"model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
48 |
+
"model.layers.14.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
|
49 |
+
"model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
50 |
+
"model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
51 |
+
"model.layers.15.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
|
52 |
+
"model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
53 |
+
"model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
54 |
+
"model.layers.15.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
|
55 |
+
"model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
56 |
+
"model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
57 |
+
"model.layers.16.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
|
58 |
+
"model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
59 |
+
"model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
60 |
+
"model.layers.16.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
|
61 |
+
"model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
62 |
+
"model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
63 |
+
"model.layers.17.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
|
64 |
+
"model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
65 |
+
"model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
66 |
+
"model.layers.17.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
|
67 |
+
"model.layers.18.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
68 |
+
"model.layers.18.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
69 |
+
"model.layers.18.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
|
70 |
+
"model.layers.18.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
71 |
+
"model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
72 |
+
"model.layers.18.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
|
73 |
+
"model.layers.19.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
74 |
+
"model.layers.19.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
75 |
+
"model.layers.19.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
|
76 |
+
"model.layers.19.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
77 |
+
"model.layers.19.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
78 |
+
"model.layers.19.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
|
79 |
+
"model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
80 |
+
"model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
81 |
+
"model.layers.2.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
|
82 |
+
"model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
83 |
+
"model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
84 |
+
"model.layers.2.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
|
85 |
+
"model.layers.20.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
86 |
+
"model.layers.20.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
87 |
+
"model.layers.20.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
|
88 |
+
"model.layers.20.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
89 |
+
"model.layers.20.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
90 |
+
"model.layers.20.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
|
91 |
+
"model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
92 |
+
"model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
93 |
+
"model.layers.21.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
|
94 |
+
"model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
95 |
+
"model.layers.21.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
96 |
+
"model.layers.21.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
|
97 |
+
"model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
98 |
+
"model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
99 |
+
"model.layers.22.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
|
100 |
+
"model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
101 |
+
"model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
102 |
+
"model.layers.22.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
|
103 |
+
"model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
104 |
+
"model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
105 |
+
"model.layers.23.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
|
106 |
+
"model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
107 |
+
"model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
108 |
+
"model.layers.23.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
|
109 |
+
"model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
110 |
+
"model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
111 |
+
"model.layers.24.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
|
112 |
+
"model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
113 |
+
"model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
114 |
+
"model.layers.24.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
|
115 |
+
"model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
116 |
+
"model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
117 |
+
"model.layers.25.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
|
118 |
+
"model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
119 |
+
"model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
120 |
+
"model.layers.25.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
|
121 |
+
"model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
122 |
+
"model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
123 |
+
"model.layers.26.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
|
124 |
+
"model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
125 |
+
"model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
126 |
+
"model.layers.26.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
|
127 |
+
"model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
128 |
+
"model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
129 |
+
"model.layers.27.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
|
130 |
+
"model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
131 |
+
"model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
132 |
+
"model.layers.27.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
|
133 |
+
"model.layers.28.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
134 |
+
"model.layers.28.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
135 |
+
"model.layers.28.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
|
136 |
+
"model.layers.28.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
137 |
+
"model.layers.28.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
138 |
+
"model.layers.28.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
|
139 |
+
"model.layers.29.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
140 |
+
"model.layers.29.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
141 |
+
"model.layers.29.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
|
142 |
+
"model.layers.29.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
143 |
+
"model.layers.29.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
144 |
+
"model.layers.29.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
|
145 |
+
"model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
146 |
+
"model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
147 |
+
"model.layers.3.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
|
148 |
+
"model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
149 |
+
"model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
150 |
+
"model.layers.3.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
|
151 |
+
"model.layers.30.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
152 |
+
"model.layers.30.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
153 |
+
"model.layers.30.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
|
154 |
+
"model.layers.30.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
155 |
+
"model.layers.30.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
156 |
+
"model.layers.30.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
|
157 |
+
"model.layers.31.input_layernorm.weight": "model-00002-of-00002.safetensors",
|
158 |
+
"model.layers.31.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
|
159 |
+
"model.layers.31.mlp.gate_up_proj.weight": "model-00002-of-00002.safetensors",
|
160 |
+
"model.layers.31.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
|
161 |
+
"model.layers.31.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
|
162 |
+
"model.layers.31.self_attn.qkv_proj.weight": "model-00002-of-00002.safetensors",
|
163 |
+
"model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
164 |
+
"model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
165 |
+
"model.layers.4.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
|
166 |
+
"model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
167 |
+
"model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
168 |
+
"model.layers.4.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
|
169 |
+
"model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
170 |
+
"model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
171 |
+
"model.layers.5.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
|
172 |
+
"model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
173 |
+
"model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
174 |
+
"model.layers.5.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
|
175 |
+
"model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
176 |
+
"model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
177 |
+
"model.layers.6.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
|
178 |
+
"model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
179 |
+
"model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
180 |
+
"model.layers.6.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
|
181 |
+
"model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
182 |
+
"model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
183 |
+
"model.layers.7.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
|
184 |
+
"model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
185 |
+
"model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
186 |
+
"model.layers.7.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
|
187 |
+
"model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
188 |
+
"model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
189 |
+
"model.layers.8.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
|
190 |
+
"model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
191 |
+
"model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
192 |
+
"model.layers.8.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
|
193 |
+
"model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
|
194 |
+
"model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
|
195 |
+
"model.layers.9.mlp.gate_up_proj.weight": "model-00001-of-00002.safetensors",
|
196 |
+
"model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
|
197 |
+
"model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
|
198 |
+
"model.layers.9.self_attn.qkv_proj.weight": "model-00001-of-00002.safetensors",
|
199 |
+
"model.norm.weight": "model-00002-of-00002.safetensors"
|
200 |
+
}
|
201 |
+
}
|
modeling_phi3.py
ADDED
@@ -0,0 +1,1180 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding=utf-8
|
2 |
+
# Copyright 2024 Microsoft and the HuggingFace Inc. team. All rights reserved.
|
3 |
+
#
|
4 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
+
# you may not use this file except in compliance with the License.
|
6 |
+
# You may obtain a copy of the License at
|
7 |
+
#
|
8 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
+
#
|
10 |
+
# Unless required by applicable law or agreed to in writing, software
|
11 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
+
# See the License for the specific language governing permissions and
|
14 |
+
# limitations under the License.
|
15 |
+
|
16 |
+
"""PyTorch Phi-3 model."""
|
17 |
+
|
18 |
+
from typing import Callable, List, Optional, Tuple, Union
|
19 |
+
|
20 |
+
import torch
|
21 |
+
from torch import nn
|
22 |
+
|
23 |
+
from transformers.activations import ACT2FN
|
24 |
+
from transformers.cache_utils import Cache, DynamicCache, SlidingWindowCache, StaticCache
|
25 |
+
from transformers.generation import GenerationMixin
|
26 |
+
from transformers.modeling_attn_mask_utils import AttentionMaskConverter
|
27 |
+
from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
|
28 |
+
from transformers.modeling_outputs import (
|
29 |
+
BaseModelOutputWithPast,
|
30 |
+
CausalLMOutputWithPast,
|
31 |
+
SequenceClassifierOutputWithPast,
|
32 |
+
TokenClassifierOutput,
|
33 |
+
)
|
34 |
+
from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS
|
35 |
+
from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
|
36 |
+
from transformers.processing_utils import Unpack
|
37 |
+
from transformers.utils import (
|
38 |
+
LossKwargs,
|
39 |
+
add_code_sample_docstrings,
|
40 |
+
add_start_docstrings,
|
41 |
+
add_start_docstrings_to_model_forward,
|
42 |
+
logging,
|
43 |
+
replace_return_docstrings,
|
44 |
+
)
|
45 |
+
from transformers.utils.deprecation import deprecate_kwarg
|
46 |
+
from .configuration_phi3 import Phi3Config
|
47 |
+
|
48 |
+
|
49 |
+
logger = logging.get_logger(__name__)
|
50 |
+
|
51 |
+
_CHECKPOINT_FOR_DOC = "microsoft/Phi-3-mini-4k-instruct"
|
52 |
+
_CONFIG_FOR_DOC = "Phi3Config"
|
53 |
+
|
54 |
+
|
55 |
+
class Phi3MLP(nn.Module):
|
56 |
+
def __init__(self, config):
|
57 |
+
super().__init__()
|
58 |
+
|
59 |
+
self.config = config
|
60 |
+
self.gate_up_proj = nn.Linear(config.hidden_size, 2 * config.intermediate_size, bias=False)
|
61 |
+
self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
|
62 |
+
self.activation_fn = ACT2FN[config.hidden_act]
|
63 |
+
|
64 |
+
def forward(self, hidden_states: torch.FloatTensor) -> torch.FloatTensor:
|
65 |
+
up_states = self.gate_up_proj(hidden_states)
|
66 |
+
|
67 |
+
gate, up_states = up_states.chunk(2, dim=-1)
|
68 |
+
up_states = up_states * self.activation_fn(gate)
|
69 |
+
|
70 |
+
return self.down_proj(up_states)
|
71 |
+
|
72 |
+
|
73 |
+
def rotate_half(x):
|
74 |
+
"""Rotates half the hidden dims of the input."""
|
75 |
+
x1 = x[..., : x.shape[-1] // 2]
|
76 |
+
x2 = x[..., x.shape[-1] // 2 :]
|
77 |
+
return torch.cat((-x2, x1), dim=-1)
|
78 |
+
|
79 |
+
|
80 |
+
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
|
81 |
+
"""
|
82 |
+
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
|
83 |
+
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
|
84 |
+
"""
|
85 |
+
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
|
86 |
+
if n_rep == 1:
|
87 |
+
return hidden_states
|
88 |
+
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
|
89 |
+
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
|
90 |
+
|
91 |
+
|
92 |
+
def eager_attention_forward(
|
93 |
+
module: nn.Module,
|
94 |
+
query: torch.Tensor,
|
95 |
+
key: torch.Tensor,
|
96 |
+
value: torch.Tensor,
|
97 |
+
attention_mask: Optional[torch.Tensor],
|
98 |
+
scaling: float,
|
99 |
+
dropout: float = 0.0,
|
100 |
+
**kwargs,
|
101 |
+
):
|
102 |
+
key_states = repeat_kv(key, module.num_key_value_groups)
|
103 |
+
value_states = repeat_kv(value, module.num_key_value_groups)
|
104 |
+
|
105 |
+
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
|
106 |
+
if attention_mask is not None:
|
107 |
+
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
|
108 |
+
attn_weights = attn_weights + causal_mask
|
109 |
+
|
110 |
+
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
|
111 |
+
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
|
112 |
+
attn_output = torch.matmul(attn_weights, value_states)
|
113 |
+
attn_output = attn_output.transpose(1, 2).contiguous()
|
114 |
+
|
115 |
+
return attn_output, attn_weights
|
116 |
+
|
117 |
+
|
118 |
+
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
|
119 |
+
"""Applies Rotary Position Embedding to the query and key tensors.
|
120 |
+
|
121 |
+
Args:
|
122 |
+
q (`torch.Tensor`): The query tensor.
|
123 |
+
k (`torch.Tensor`): The key tensor.
|
124 |
+
cos (`torch.Tensor`): The cosine part of the rotary embedding.
|
125 |
+
sin (`torch.Tensor`): The sine part of the rotary embedding.
|
126 |
+
position_ids (`torch.Tensor`, *optional*):
|
127 |
+
Deprecated and unused.
|
128 |
+
unsqueeze_dim (`int`, *optional*, defaults to 1):
|
129 |
+
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
|
130 |
+
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
|
131 |
+
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
|
132 |
+
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
|
133 |
+
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
|
134 |
+
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
|
135 |
+
Returns:
|
136 |
+
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
|
137 |
+
"""
|
138 |
+
cos = cos.unsqueeze(unsqueeze_dim)
|
139 |
+
sin = sin.unsqueeze(unsqueeze_dim)
|
140 |
+
|
141 |
+
rotary_dim = cos.shape[-1]
|
142 |
+
q_rot, q_pass = q[..., :rotary_dim], q[..., rotary_dim:]
|
143 |
+
k_rot, k_pass = k[..., :rotary_dim], k[..., rotary_dim:]
|
144 |
+
|
145 |
+
q_embed = torch.cat([(q_rot * cos) + (rotate_half(q_rot) * sin), q_pass], dim=-1)
|
146 |
+
k_embed = torch.cat([(k_rot * cos) + (rotate_half(k_rot) * sin), k_pass], dim=-1)
|
147 |
+
return q_embed, k_embed
|
148 |
+
|
149 |
+
|
150 |
+
class Phi3Attention(nn.Module):
|
151 |
+
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
152 |
+
|
153 |
+
def __init__(self, config: Phi3Config, layer_idx: Optional[int] = None):
|
154 |
+
super().__init__()
|
155 |
+
self.config = config
|
156 |
+
self.layer_idx = layer_idx
|
157 |
+
self.head_dim = getattr(config, "head_dim", config.hidden_size // config.num_attention_heads)
|
158 |
+
self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
|
159 |
+
self.num_key_value_heads = config.num_key_value_heads
|
160 |
+
self.scaling = self.head_dim**-0.5
|
161 |
+
self.attention_dropout = config.attention_dropout
|
162 |
+
self.is_causal = True
|
163 |
+
|
164 |
+
op_size = config.num_attention_heads * self.head_dim + 2 * (config.num_key_value_heads * self.head_dim)
|
165 |
+
self.o_proj = nn.Linear(config.num_attention_heads * self.head_dim, config.hidden_size, bias=False)
|
166 |
+
self.qkv_proj = nn.Linear(config.hidden_size, op_size, bias=False)
|
167 |
+
|
168 |
+
def forward(
|
169 |
+
self,
|
170 |
+
hidden_states: torch.Tensor,
|
171 |
+
position_embeddings: Tuple[torch.Tensor, torch.Tensor],
|
172 |
+
attention_mask: Optional[torch.Tensor],
|
173 |
+
past_key_value: Optional[Cache] = None,
|
174 |
+
cache_position: Optional[torch.LongTensor] = None,
|
175 |
+
**kwargs: Unpack[FlashAttentionKwargs],
|
176 |
+
) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
|
177 |
+
input_shape = hidden_states.shape[:-1]
|
178 |
+
hidden_shape = (*input_shape, -1, self.head_dim)
|
179 |
+
|
180 |
+
qkv = self.qkv_proj(hidden_states)
|
181 |
+
query_pos = self.config.num_attention_heads * self.head_dim
|
182 |
+
query_states = qkv[..., :query_pos]
|
183 |
+
key_states = qkv[..., query_pos : query_pos + self.num_key_value_heads * self.head_dim]
|
184 |
+
value_states = qkv[..., query_pos + self.num_key_value_heads * self.head_dim :]
|
185 |
+
|
186 |
+
query_states = query_states.view(hidden_shape).transpose(1, 2)
|
187 |
+
key_states = key_states.view(hidden_shape).transpose(1, 2)
|
188 |
+
value_states = value_states.view(hidden_shape).transpose(1, 2)
|
189 |
+
|
190 |
+
cos, sin = position_embeddings
|
191 |
+
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
|
192 |
+
|
193 |
+
if past_key_value is not None:
|
194 |
+
# sin and cos are specific to RoPE models; cache_position needed for the static cache
|
195 |
+
cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
|
196 |
+
key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
|
197 |
+
|
198 |
+
attention_interface: Callable = eager_attention_forward
|
199 |
+
if self.config._attn_implementation != "eager":
|
200 |
+
if self.config._attn_implementation == "sdpa" and kwargs.get("output_attentions", False):
|
201 |
+
logger.warning_once(
|
202 |
+
"`torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to "
|
203 |
+
'eager attention. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
|
204 |
+
)
|
205 |
+
else:
|
206 |
+
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
|
207 |
+
|
208 |
+
attn_output, attn_weights = attention_interface(
|
209 |
+
self,
|
210 |
+
query_states,
|
211 |
+
key_states,
|
212 |
+
value_states,
|
213 |
+
attention_mask,
|
214 |
+
dropout=0.0 if not self.training else self.attention_dropout,
|
215 |
+
scaling=self.scaling,
|
216 |
+
sliding_window=getattr(self.config, "sliding_window", None),
|
217 |
+
**kwargs,
|
218 |
+
)
|
219 |
+
|
220 |
+
attn_output = attn_output.reshape(*input_shape, -1).contiguous()
|
221 |
+
attn_output = self.o_proj(attn_output)
|
222 |
+
return attn_output, attn_weights
|
223 |
+
|
224 |
+
|
225 |
+
class Phi3RMSNorm(nn.Module):
|
226 |
+
def __init__(self, hidden_size, eps=1e-6):
|
227 |
+
"""
|
228 |
+
Phi3RMSNorm is equivalent to T5LayerNorm
|
229 |
+
"""
|
230 |
+
super().__init__()
|
231 |
+
self.weight = nn.Parameter(torch.ones(hidden_size))
|
232 |
+
self.variance_epsilon = eps
|
233 |
+
|
234 |
+
def forward(self, hidden_states):
|
235 |
+
input_dtype = hidden_states.dtype
|
236 |
+
hidden_states = hidden_states.to(torch.float32)
|
237 |
+
variance = hidden_states.pow(2).mean(-1, keepdim=True)
|
238 |
+
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
|
239 |
+
return self.weight * hidden_states.to(input_dtype)
|
240 |
+
|
241 |
+
def extra_repr(self):
|
242 |
+
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
|
243 |
+
|
244 |
+
|
245 |
+
class Phi3DecoderLayer(nn.Module):
|
246 |
+
def __init__(self, config: Phi3Config, layer_idx: int):
|
247 |
+
super().__init__()
|
248 |
+
self.hidden_size = config.hidden_size
|
249 |
+
self.self_attn = Phi3Attention(config=config, layer_idx=layer_idx)
|
250 |
+
self.mlp = Phi3MLP(config)
|
251 |
+
self.input_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
252 |
+
self.post_attention_layernorm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
253 |
+
self.config = config
|
254 |
+
self.resid_attn_dropout = nn.Dropout(config.resid_pdrop)
|
255 |
+
self.resid_mlp_dropout = nn.Dropout(config.resid_pdrop)
|
256 |
+
|
257 |
+
def forward(
|
258 |
+
self,
|
259 |
+
hidden_states: torch.Tensor,
|
260 |
+
attention_mask: Optional[torch.Tensor] = None,
|
261 |
+
position_ids: Optional[torch.LongTensor] = None,
|
262 |
+
past_key_value: Optional[Cache] = None,
|
263 |
+
output_attentions: Optional[bool] = False,
|
264 |
+
use_cache: Optional[bool] = False,
|
265 |
+
cache_position: Optional[torch.LongTensor] = None,
|
266 |
+
position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # necessary, but kept here for BC
|
267 |
+
**kwargs: Unpack[FlashAttentionKwargs],
|
268 |
+
) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
|
269 |
+
"""
|
270 |
+
Args:
|
271 |
+
hidden_states (`torch.FloatTensor`):
|
272 |
+
input to the layer of shape `(batch, seq_len, embed_dim)`
|
273 |
+
attention_mask (`torch.FloatTensor`, *optional*): attention mask of size
|
274 |
+
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
275 |
+
position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
|
276 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range
|
277 |
+
`[0, config.n_positions - 1]`. [What are position IDs?](../glossary#position-ids)
|
278 |
+
past_key_value (`Cache`, *optional*): cached past key and value projection states
|
279 |
+
output_attentions (`bool`, *optional*):
|
280 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
281 |
+
returned tensors for more detail.
|
282 |
+
use_cache (`bool`, *optional*):
|
283 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
284 |
+
(see `past_key_values`).
|
285 |
+
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
|
286 |
+
Indices depicting the position of the input sequence tokens in the sequence
|
287 |
+
kwargs (`dict`, *optional*):
|
288 |
+
Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
|
289 |
+
into the model
|
290 |
+
"""
|
291 |
+
residual = hidden_states
|
292 |
+
|
293 |
+
hidden_states = self.input_layernorm(hidden_states)
|
294 |
+
|
295 |
+
# Self Attention
|
296 |
+
hidden_states, self_attn_weights = self.self_attn(
|
297 |
+
hidden_states=hidden_states,
|
298 |
+
attention_mask=attention_mask,
|
299 |
+
position_ids=position_ids,
|
300 |
+
past_key_value=past_key_value,
|
301 |
+
output_attentions=output_attentions,
|
302 |
+
use_cache=use_cache,
|
303 |
+
cache_position=cache_position,
|
304 |
+
position_embeddings=position_embeddings,
|
305 |
+
**kwargs,
|
306 |
+
)
|
307 |
+
hidden_states = residual + self.resid_attn_dropout(hidden_states) # main diff with Llama
|
308 |
+
|
309 |
+
residual = hidden_states
|
310 |
+
hidden_states = self.post_attention_layernorm(hidden_states)
|
311 |
+
hidden_states = self.mlp(hidden_states)
|
312 |
+
hidden_states = residual + self.resid_mlp_dropout(hidden_states) # main diff with Llama
|
313 |
+
|
314 |
+
outputs = (hidden_states,)
|
315 |
+
if output_attentions:
|
316 |
+
outputs += (self_attn_weights,)
|
317 |
+
|
318 |
+
return outputs
|
319 |
+
|
320 |
+
|
321 |
+
class Phi3RotaryEmbedding(nn.Module):
|
322 |
+
def __init__(self, config: Phi3Config, device=None):
|
323 |
+
super().__init__()
|
324 |
+
# BC: "rope_type" was originally "type"
|
325 |
+
if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
|
326 |
+
self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
|
327 |
+
else:
|
328 |
+
self.rope_type = "default"
|
329 |
+
self.max_seq_len_cached = config.max_position_embeddings
|
330 |
+
self.original_max_seq_len = config.max_position_embeddings
|
331 |
+
|
332 |
+
self.config = config
|
333 |
+
self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
|
334 |
+
|
335 |
+
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
|
336 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False)
|
337 |
+
self.original_inv_freq = self.inv_freq
|
338 |
+
|
339 |
+
def _dynamic_frequency_update(self, position_ids, device):
|
340 |
+
"""
|
341 |
+
dynamic RoPE layers should recompute `inv_freq` in the following situations:
|
342 |
+
1 - growing beyond the cached sequence length (allow scaling)
|
343 |
+
2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
|
344 |
+
"""
|
345 |
+
seq_len = torch.max(position_ids) + 1
|
346 |
+
if seq_len > self.max_seq_len_cached: # growth
|
347 |
+
inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, seq_len=seq_len)
|
348 |
+
self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
|
349 |
+
self.max_seq_len_cached = seq_len
|
350 |
+
|
351 |
+
if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
|
352 |
+
# This .to() is needed if the model has been moved to a device after being initialized (because
|
353 |
+
# the buffer is automatically moved, but not the original copy)
|
354 |
+
self.original_inv_freq = self.original_inv_freq.to(device)
|
355 |
+
self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
|
356 |
+
self.max_seq_len_cached = self.original_max_seq_len
|
357 |
+
|
358 |
+
@torch.no_grad()
|
359 |
+
def forward(self, x, position_ids):
|
360 |
+
if "dynamic" in self.rope_type:
|
361 |
+
self._dynamic_frequency_update(position_ids, device=x.device)
|
362 |
+
elif self.rope_type == "longrope":
|
363 |
+
self._longrope_frequency_update(position_ids, device=x.device)
|
364 |
+
|
365 |
+
# Core RoPE block
|
366 |
+
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
|
367 |
+
position_ids_expanded = position_ids[:, None, :].float()
|
368 |
+
# Force float32 (see https://github.com/huggingface/transformers/pull/29285)
|
369 |
+
device_type = x.device.type
|
370 |
+
device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
|
371 |
+
with torch.autocast(device_type=device_type, enabled=False):
|
372 |
+
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
|
373 |
+
emb = torch.cat((freqs, freqs), dim=-1)
|
374 |
+
cos = emb.cos()
|
375 |
+
sin = emb.sin()
|
376 |
+
|
377 |
+
# Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
|
378 |
+
cos = cos * self.attention_scaling
|
379 |
+
sin = sin * self.attention_scaling
|
380 |
+
|
381 |
+
return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
|
382 |
+
|
383 |
+
def _longrope_frequency_update(self, position_ids, device):
|
384 |
+
"""Longrope uses long factor if sequence is larger than original pretraining length, short otherwise."""
|
385 |
+
seq_len = torch.max(position_ids) + 1
|
386 |
+
if hasattr(self.config, "original_max_position_embeddings"):
|
387 |
+
original_max_position_embeddings = self.config.original_max_position_embeddings
|
388 |
+
else:
|
389 |
+
original_max_position_embeddings = self.config.max_position_embeddings
|
390 |
+
if seq_len > original_max_position_embeddings:
|
391 |
+
if not hasattr(self, "long_inv_freq"):
|
392 |
+
self.long_inv_freq, _ = self.rope_init_fn(
|
393 |
+
self.config, device, seq_len=original_max_position_embeddings + 1
|
394 |
+
)
|
395 |
+
self.register_buffer("inv_freq", self.long_inv_freq, persistent=False)
|
396 |
+
else:
|
397 |
+
# This .to() is needed if the model has been moved to a device after being initialized (because
|
398 |
+
# the buffer is automatically moved, but not the original copy)
|
399 |
+
self.original_inv_freq = self.original_inv_freq.to(device)
|
400 |
+
self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
|
401 |
+
|
402 |
+
|
403 |
+
PHI3_START_DOCSTRING = r"""
|
404 |
+
This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
|
405 |
+
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
|
406 |
+
etc.)
|
407 |
+
|
408 |
+
This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
|
409 |
+
Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
|
410 |
+
and behavior.
|
411 |
+
|
412 |
+
Parameters:
|
413 |
+
config ([`Phi3Config`]):
|
414 |
+
Model configuration class with all the parameters of the model. Initializing with a config file does not
|
415 |
+
load the weights associated with the model, only the configuration. Check out the
|
416 |
+
[`~PreTrainedModel.from_pretrained`] method to load the model weights.
|
417 |
+
"""
|
418 |
+
|
419 |
+
|
420 |
+
@add_start_docstrings(
|
421 |
+
"The bare Phi3 Model outputting raw hidden-states without any specific head on top.",
|
422 |
+
PHI3_START_DOCSTRING,
|
423 |
+
)
|
424 |
+
class Phi3PreTrainedModel(PreTrainedModel):
|
425 |
+
config_class = Phi3Config
|
426 |
+
base_model_prefix = "model"
|
427 |
+
supports_gradient_checkpointing = True
|
428 |
+
_no_split_modules = ["Phi3DecoderLayer"]
|
429 |
+
_skip_keys_device_placement = ["past_key_values"]
|
430 |
+
_supports_flash_attn_2 = True
|
431 |
+
_supports_sdpa = True
|
432 |
+
_supports_flex_attn = True
|
433 |
+
_supports_cache_class = True
|
434 |
+
_supports_quantized_cache = True
|
435 |
+
_supports_static_cache = True
|
436 |
+
_supports_attention_backend = True
|
437 |
+
_version = "0.0.5"
|
438 |
+
|
439 |
+
def _init_weights(self, module):
|
440 |
+
std = self.config.initializer_range
|
441 |
+
if isinstance(module, nn.Linear):
|
442 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
443 |
+
if module.bias is not None:
|
444 |
+
module.bias.data.zero_()
|
445 |
+
elif isinstance(module, nn.Embedding):
|
446 |
+
module.weight.data.normal_(mean=0.0, std=std)
|
447 |
+
if module.padding_idx is not None:
|
448 |
+
module.weight.data[module.padding_idx].zero_()
|
449 |
+
|
450 |
+
|
451 |
+
PHI3_INPUTS_DOCSTRING = r"""
|
452 |
+
Args:
|
453 |
+
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
454 |
+
Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
|
455 |
+
it.
|
456 |
+
|
457 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
458 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
459 |
+
|
460 |
+
[What are input IDs?](../glossary#input-ids)
|
461 |
+
attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
|
462 |
+
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
|
463 |
+
|
464 |
+
- 1 for tokens that are **not masked**,
|
465 |
+
- 0 for tokens that are **masked**.
|
466 |
+
|
467 |
+
[What are attention masks?](../glossary#attention-mask)
|
468 |
+
|
469 |
+
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
|
470 |
+
[`PreTrainedTokenizer.__call__`] for details.
|
471 |
+
|
472 |
+
If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
|
473 |
+
`past_key_values`).
|
474 |
+
|
475 |
+
If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
|
476 |
+
and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
|
477 |
+
information on the default strategy.
|
478 |
+
|
479 |
+
- 1 indicates the head is **not masked**,
|
480 |
+
- 0 indicates the head is **masked**.
|
481 |
+
position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
482 |
+
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
|
483 |
+
config.n_positions - 1]`.
|
484 |
+
|
485 |
+
[What are position IDs?](../glossary#position-ids)
|
486 |
+
past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
|
487 |
+
Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
|
488 |
+
blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
|
489 |
+
returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
|
490 |
+
|
491 |
+
Two formats are allowed:
|
492 |
+
- a [`~cache_utils.Cache`] instance, see our
|
493 |
+
[kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache);
|
494 |
+
- Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
|
495 |
+
shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
|
496 |
+
cache format.
|
497 |
+
|
498 |
+
The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
|
499 |
+
legacy cache format will be returned.
|
500 |
+
|
501 |
+
If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
|
502 |
+
have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
|
503 |
+
of shape `(batch_size, sequence_length)`.
|
504 |
+
inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
|
505 |
+
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
|
506 |
+
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
|
507 |
+
model's internal embedding lookup matrix.
|
508 |
+
use_cache (`bool`, *optional*):
|
509 |
+
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
|
510 |
+
`past_key_values`).
|
511 |
+
output_attentions (`bool`, *optional*):
|
512 |
+
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
|
513 |
+
tensors for more detail.
|
514 |
+
output_hidden_states (`bool`, *optional*):
|
515 |
+
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
|
516 |
+
more detail.
|
517 |
+
return_dict (`bool`, *optional*):
|
518 |
+
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
|
519 |
+
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
|
520 |
+
Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
|
521 |
+
this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
|
522 |
+
the complete sequence length.
|
523 |
+
"""
|
524 |
+
|
525 |
+
|
526 |
+
@add_start_docstrings(
|
527 |
+
"The bare Phi3 Model outputting raw hidden-states without any specific head on top.",
|
528 |
+
PHI3_START_DOCSTRING,
|
529 |
+
)
|
530 |
+
class Phi3Model(Phi3PreTrainedModel):
|
531 |
+
"""
|
532 |
+
Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`Phi3DecoderLayer`]
|
533 |
+
|
534 |
+
Args:
|
535 |
+
config: Phi3Config
|
536 |
+
"""
|
537 |
+
|
538 |
+
def __init__(self, config: Phi3Config):
|
539 |
+
super().__init__(config)
|
540 |
+
self.padding_idx = config.pad_token_id
|
541 |
+
self.vocab_size = config.vocab_size
|
542 |
+
|
543 |
+
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
|
544 |
+
self.layers = nn.ModuleList(
|
545 |
+
[Phi3DecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
|
546 |
+
)
|
547 |
+
self.norm = Phi3RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
|
548 |
+
self.rotary_emb = Phi3RotaryEmbedding(config=config)
|
549 |
+
self.gradient_checkpointing = False
|
550 |
+
|
551 |
+
# Initialize weights and apply final processing
|
552 |
+
self.post_init()
|
553 |
+
|
554 |
+
def get_input_embeddings(self):
|
555 |
+
return self.embed_tokens
|
556 |
+
|
557 |
+
def set_input_embeddings(self, value):
|
558 |
+
self.embed_tokens = value
|
559 |
+
|
560 |
+
@add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
|
561 |
+
def forward(
|
562 |
+
self,
|
563 |
+
input_ids: torch.LongTensor = None,
|
564 |
+
attention_mask: Optional[torch.Tensor] = None,
|
565 |
+
position_ids: Optional[torch.LongTensor] = None,
|
566 |
+
past_key_values: Optional[Cache] = None,
|
567 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
568 |
+
use_cache: Optional[bool] = None,
|
569 |
+
output_attentions: Optional[bool] = None,
|
570 |
+
output_hidden_states: Optional[bool] = None,
|
571 |
+
return_dict: Optional[bool] = None,
|
572 |
+
cache_position: Optional[torch.LongTensor] = None,
|
573 |
+
**flash_attn_kwargs: Unpack[FlashAttentionKwargs],
|
574 |
+
) -> Union[Tuple, BaseModelOutputWithPast]:
|
575 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
576 |
+
output_hidden_states = (
|
577 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
578 |
+
)
|
579 |
+
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
580 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
581 |
+
|
582 |
+
if (input_ids is None) ^ (inputs_embeds is not None):
|
583 |
+
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
|
584 |
+
|
585 |
+
if self.gradient_checkpointing and self.training and use_cache:
|
586 |
+
logger.warning_once(
|
587 |
+
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
|
588 |
+
)
|
589 |
+
use_cache = False
|
590 |
+
|
591 |
+
if inputs_embeds is None:
|
592 |
+
inputs_embeds = self.embed_tokens(input_ids)
|
593 |
+
|
594 |
+
if use_cache and past_key_values is None:
|
595 |
+
past_key_values = DynamicCache()
|
596 |
+
|
597 |
+
if cache_position is None:
|
598 |
+
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
599 |
+
cache_position = torch.arange(
|
600 |
+
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
|
601 |
+
)
|
602 |
+
|
603 |
+
if position_ids is None:
|
604 |
+
position_ids = cache_position.unsqueeze(0)
|
605 |
+
|
606 |
+
causal_mask = self._update_causal_mask(
|
607 |
+
attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
|
608 |
+
)
|
609 |
+
|
610 |
+
hidden_states = inputs_embeds
|
611 |
+
|
612 |
+
# create position embeddings to be shared across the decoder layers
|
613 |
+
position_embeddings = self.rotary_emb(hidden_states, position_ids)
|
614 |
+
|
615 |
+
# decoder layers
|
616 |
+
all_hidden_states = () if output_hidden_states else None
|
617 |
+
all_self_attns = () if output_attentions else None
|
618 |
+
|
619 |
+
for decoder_layer in self.layers[: self.config.num_hidden_layers]:
|
620 |
+
if output_hidden_states:
|
621 |
+
all_hidden_states += (hidden_states,)
|
622 |
+
|
623 |
+
if self.gradient_checkpointing and self.training:
|
624 |
+
layer_outputs = self._gradient_checkpointing_func(
|
625 |
+
decoder_layer.__call__,
|
626 |
+
hidden_states,
|
627 |
+
causal_mask,
|
628 |
+
position_ids,
|
629 |
+
past_key_values,
|
630 |
+
output_attentions,
|
631 |
+
use_cache,
|
632 |
+
cache_position,
|
633 |
+
position_embeddings,
|
634 |
+
)
|
635 |
+
else:
|
636 |
+
layer_outputs = decoder_layer(
|
637 |
+
hidden_states,
|
638 |
+
attention_mask=causal_mask,
|
639 |
+
position_ids=position_ids,
|
640 |
+
past_key_value=past_key_values,
|
641 |
+
output_attentions=output_attentions,
|
642 |
+
use_cache=use_cache,
|
643 |
+
cache_position=cache_position,
|
644 |
+
position_embeddings=position_embeddings,
|
645 |
+
**flash_attn_kwargs,
|
646 |
+
)
|
647 |
+
|
648 |
+
hidden_states = layer_outputs[0]
|
649 |
+
|
650 |
+
if output_attentions:
|
651 |
+
all_self_attns += (layer_outputs[1],)
|
652 |
+
|
653 |
+
hidden_states = self.norm(hidden_states)
|
654 |
+
|
655 |
+
# add hidden states from the last decoder layer
|
656 |
+
if output_hidden_states:
|
657 |
+
all_hidden_states += (hidden_states,)
|
658 |
+
|
659 |
+
output = BaseModelOutputWithPast(
|
660 |
+
last_hidden_state=hidden_states,
|
661 |
+
past_key_values=past_key_values if use_cache else None,
|
662 |
+
hidden_states=all_hidden_states,
|
663 |
+
attentions=all_self_attns,
|
664 |
+
)
|
665 |
+
return output if return_dict else output.to_tuple()
|
666 |
+
|
667 |
+
def _update_causal_mask(
|
668 |
+
self,
|
669 |
+
attention_mask: torch.Tensor,
|
670 |
+
input_tensor: torch.Tensor,
|
671 |
+
cache_position: torch.Tensor,
|
672 |
+
past_key_values: Cache,
|
673 |
+
output_attentions: bool,
|
674 |
+
):
|
675 |
+
if self.config._attn_implementation == "flash_attention_2":
|
676 |
+
if attention_mask is not None and past_key_values is not None:
|
677 |
+
is_padding_right = attention_mask[:, -1].sum().item() != input_tensor.size()[0]
|
678 |
+
if is_padding_right:
|
679 |
+
raise ValueError(
|
680 |
+
"You are attempting to perform batched generation with padding_side='right'"
|
681 |
+
" this may lead to unexpected behaviour for Flash Attention version of Phi3. Make sure to "
|
682 |
+
" call `tokenizer.padding_side = 'left'` before tokenizing the input. "
|
683 |
+
)
|
684 |
+
if attention_mask is not None and 0.0 in attention_mask:
|
685 |
+
return attention_mask
|
686 |
+
return None
|
687 |
+
|
688 |
+
# For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
|
689 |
+
# order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
|
690 |
+
# to infer the attention mask.
|
691 |
+
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
692 |
+
using_static_cache = isinstance(past_key_values, StaticCache)
|
693 |
+
using_sliding_window_cache = isinstance(past_key_values, SlidingWindowCache)
|
694 |
+
|
695 |
+
# When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
|
696 |
+
if (
|
697 |
+
self.config._attn_implementation == "sdpa"
|
698 |
+
and not (using_static_cache or using_sliding_window_cache)
|
699 |
+
and not output_attentions
|
700 |
+
):
|
701 |
+
if AttentionMaskConverter._ignore_causal_mask_sdpa(
|
702 |
+
attention_mask,
|
703 |
+
inputs_embeds=input_tensor,
|
704 |
+
past_key_values_length=past_seen_tokens,
|
705 |
+
sliding_window=self.config.sliding_window,
|
706 |
+
is_training=self.training,
|
707 |
+
):
|
708 |
+
return None
|
709 |
+
|
710 |
+
dtype, device = input_tensor.dtype, input_tensor.device
|
711 |
+
min_dtype = torch.finfo(dtype).min
|
712 |
+
sequence_length = input_tensor.shape[1]
|
713 |
+
# SlidingWindowCache or StaticCache
|
714 |
+
if using_sliding_window_cache or using_static_cache:
|
715 |
+
target_length = past_key_values.get_max_cache_shape()
|
716 |
+
# DynamicCache or no cache
|
717 |
+
else:
|
718 |
+
target_length = (
|
719 |
+
attention_mask.shape[-1]
|
720 |
+
if isinstance(attention_mask, torch.Tensor)
|
721 |
+
else past_seen_tokens + sequence_length + 1
|
722 |
+
)
|
723 |
+
|
724 |
+
# In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
|
725 |
+
causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
|
726 |
+
attention_mask,
|
727 |
+
sequence_length=sequence_length,
|
728 |
+
target_length=target_length,
|
729 |
+
dtype=dtype,
|
730 |
+
device=device,
|
731 |
+
cache_position=cache_position,
|
732 |
+
batch_size=input_tensor.shape[0],
|
733 |
+
config=self.config,
|
734 |
+
past_key_values=past_key_values,
|
735 |
+
)
|
736 |
+
|
737 |
+
if (
|
738 |
+
self.config._attn_implementation == "sdpa"
|
739 |
+
and attention_mask is not None
|
740 |
+
and attention_mask.device.type in ["cuda", "xpu"]
|
741 |
+
and not output_attentions
|
742 |
+
):
|
743 |
+
# Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
|
744 |
+
# using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
|
745 |
+
# Details: https://github.com/pytorch/pytorch/issues/110213
|
746 |
+
causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
|
747 |
+
|
748 |
+
return causal_mask
|
749 |
+
|
750 |
+
@staticmethod
|
751 |
+
def _prepare_4d_causal_attention_mask_with_cache_position(
|
752 |
+
attention_mask: torch.Tensor,
|
753 |
+
sequence_length: int,
|
754 |
+
target_length: int,
|
755 |
+
dtype: torch.dtype,
|
756 |
+
device: torch.device,
|
757 |
+
cache_position: torch.Tensor,
|
758 |
+
batch_size: int,
|
759 |
+
config: Phi3Config,
|
760 |
+
past_key_values: Cache,
|
761 |
+
):
|
762 |
+
"""
|
763 |
+
Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
|
764 |
+
`(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
|
765 |
+
|
766 |
+
Args:
|
767 |
+
attention_mask (`torch.Tensor`):
|
768 |
+
A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
|
769 |
+
sequence_length (`int`):
|
770 |
+
The sequence length being processed.
|
771 |
+
target_length (`int`):
|
772 |
+
The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
|
773 |
+
dtype (`torch.dtype`):
|
774 |
+
The dtype to use for the 4D attention mask.
|
775 |
+
device (`torch.device`):
|
776 |
+
The device to plcae the 4D attention mask on.
|
777 |
+
cache_position (`torch.Tensor`):
|
778 |
+
Indices depicting the position of the input sequence tokens in the sequence.
|
779 |
+
batch_size (`torch.Tensor`):
|
780 |
+
Batch size.
|
781 |
+
config (`Phi3Config`):
|
782 |
+
The model's configuration class
|
783 |
+
past_key_values (`Cache`):
|
784 |
+
The cache class that is being used currently to generate
|
785 |
+
"""
|
786 |
+
if attention_mask is not None and attention_mask.dim() == 4:
|
787 |
+
# In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
|
788 |
+
causal_mask = attention_mask
|
789 |
+
else:
|
790 |
+
min_dtype = torch.finfo(dtype).min
|
791 |
+
causal_mask = torch.full(
|
792 |
+
(sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
|
793 |
+
)
|
794 |
+
diagonal_attend_mask = torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
|
795 |
+
if config.sliding_window is not None:
|
796 |
+
# if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also
|
797 |
+
# the check is needed to verify is current checkpoint was trained with sliding window or not
|
798 |
+
if not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length:
|
799 |
+
sliding_attend_mask = torch.arange(target_length, device=device) <= (
|
800 |
+
cache_position.reshape(-1, 1) - config.sliding_window
|
801 |
+
)
|
802 |
+
diagonal_attend_mask.bitwise_or_(sliding_attend_mask)
|
803 |
+
causal_mask *= diagonal_attend_mask
|
804 |
+
causal_mask = causal_mask[None, None, :, :].expand(batch_size, 1, -1, -1)
|
805 |
+
if attention_mask is not None:
|
806 |
+
causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
|
807 |
+
if attention_mask.shape[-1] > target_length:
|
808 |
+
attention_mask = attention_mask[:, :target_length]
|
809 |
+
mask_length = attention_mask.shape[-1]
|
810 |
+
padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
|
811 |
+
causal_mask.device
|
812 |
+
)
|
813 |
+
padding_mask = padding_mask == 0
|
814 |
+
causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
|
815 |
+
padding_mask, min_dtype
|
816 |
+
)
|
817 |
+
return causal_mask
|
818 |
+
|
819 |
+
|
820 |
+
class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs): ...
|
821 |
+
|
822 |
+
|
823 |
+
class Phi3ForCausalLM(Phi3PreTrainedModel, GenerationMixin):
|
824 |
+
_tied_weights_keys = ["lm_head.weight"]
|
825 |
+
_tp_plan = {"lm_head": "colwise_rep"}
|
826 |
+
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
|
827 |
+
|
828 |
+
def __init__(self, config):
|
829 |
+
super().__init__(config)
|
830 |
+
self.model = Phi3Model(config)
|
831 |
+
self.vocab_size = config.vocab_size
|
832 |
+
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
833 |
+
|
834 |
+
# Initialize weights and apply final processing
|
835 |
+
self.post_init()
|
836 |
+
|
837 |
+
def get_input_embeddings(self):
|
838 |
+
return self.model.embed_tokens
|
839 |
+
|
840 |
+
def set_input_embeddings(self, value):
|
841 |
+
self.model.embed_tokens = value
|
842 |
+
|
843 |
+
def get_output_embeddings(self):
|
844 |
+
return self.lm_head
|
845 |
+
|
846 |
+
def set_output_embeddings(self, new_embeddings):
|
847 |
+
self.lm_head = new_embeddings
|
848 |
+
|
849 |
+
def set_decoder(self, decoder):
|
850 |
+
self.model = decoder
|
851 |
+
|
852 |
+
def get_decoder(self):
|
853 |
+
return self.model
|
854 |
+
|
855 |
+
@deprecate_kwarg("num_logits_to_keep", version="4.50", new_name="logits_to_keep")
|
856 |
+
@add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
|
857 |
+
@replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
|
858 |
+
def forward(
|
859 |
+
self,
|
860 |
+
input_ids: torch.LongTensor = None,
|
861 |
+
attention_mask: Optional[torch.Tensor] = None,
|
862 |
+
position_ids: Optional[torch.LongTensor] = None,
|
863 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
864 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
865 |
+
labels: Optional[torch.LongTensor] = None,
|
866 |
+
use_cache: Optional[bool] = None,
|
867 |
+
output_attentions: Optional[bool] = None,
|
868 |
+
output_hidden_states: Optional[bool] = None,
|
869 |
+
return_dict: Optional[bool] = None,
|
870 |
+
cache_position: Optional[torch.LongTensor] = None,
|
871 |
+
logits_to_keep: Union[int, torch.Tensor] = 0,
|
872 |
+
**kwargs: Unpack[KwargsForCausalLM],
|
873 |
+
) -> Union[Tuple, CausalLMOutputWithPast]:
|
874 |
+
r"""
|
875 |
+
Args:
|
876 |
+
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
877 |
+
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
|
878 |
+
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
|
879 |
+
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
|
880 |
+
|
881 |
+
logits_to_keep (`int` or `torch.Tensor`, *optional*):
|
882 |
+
If an `int`, compute logits for the last `logits_to_keep` tokens. If `0`, calculate logits for all
|
883 |
+
`input_ids` (special case). Only last token logits are needed for generation, and calculating them only for that
|
884 |
+
token can save memory, which becomes pretty significant for long sequences or large vocabulary size.
|
885 |
+
If a `torch.Tensor`, must be 1D corresponding to the indices to keep in the sequence length dimension.
|
886 |
+
This is useful when using packed tensor format (single dimension for batch and sequence length).
|
887 |
+
|
888 |
+
Returns:
|
889 |
+
|
890 |
+
Example:
|
891 |
+
|
892 |
+
```python
|
893 |
+
>>> from transformers import AutoTokenizer, Phi3ForCausalLM
|
894 |
+
|
895 |
+
>>> model = Phi3ForCausalLM.from_pretrained("meta-phi3/Phi3-2-7b-hf")
|
896 |
+
>>> tokenizer = AutoTokenizer.from_pretrained("meta-phi3/Phi3-2-7b-hf")
|
897 |
+
|
898 |
+
>>> prompt = "Hey, are you conscious? Can you talk to me?"
|
899 |
+
>>> inputs = tokenizer(prompt, return_tensors="pt")
|
900 |
+
|
901 |
+
>>> # Generate
|
902 |
+
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
|
903 |
+
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
|
904 |
+
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
|
905 |
+
```"""
|
906 |
+
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
907 |
+
output_hidden_states = (
|
908 |
+
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
909 |
+
)
|
910 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
911 |
+
|
912 |
+
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
|
913 |
+
outputs = self.model(
|
914 |
+
input_ids=input_ids,
|
915 |
+
attention_mask=attention_mask,
|
916 |
+
position_ids=position_ids,
|
917 |
+
past_key_values=past_key_values,
|
918 |
+
inputs_embeds=inputs_embeds,
|
919 |
+
use_cache=use_cache,
|
920 |
+
output_attentions=output_attentions,
|
921 |
+
output_hidden_states=output_hidden_states,
|
922 |
+
return_dict=return_dict,
|
923 |
+
cache_position=cache_position,
|
924 |
+
**kwargs,
|
925 |
+
)
|
926 |
+
|
927 |
+
hidden_states = outputs[0]
|
928 |
+
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
929 |
+
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
|
930 |
+
logits = self.lm_head(hidden_states[:, slice_indices, :])
|
931 |
+
|
932 |
+
loss = None
|
933 |
+
if labels is not None:
|
934 |
+
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
|
935 |
+
|
936 |
+
if not return_dict:
|
937 |
+
output = (logits,) + outputs[1:]
|
938 |
+
return (loss,) + output if loss is not None else output
|
939 |
+
|
940 |
+
return CausalLMOutputWithPast(
|
941 |
+
loss=loss,
|
942 |
+
logits=logits,
|
943 |
+
past_key_values=outputs.past_key_values,
|
944 |
+
hidden_states=outputs.hidden_states,
|
945 |
+
attentions=outputs.attentions,
|
946 |
+
)
|
947 |
+
|
948 |
+
def prepare_inputs_for_generation(
|
949 |
+
self,
|
950 |
+
input_ids,
|
951 |
+
past_key_values=None,
|
952 |
+
attention_mask=None,
|
953 |
+
inputs_embeds=None,
|
954 |
+
cache_position=None,
|
955 |
+
position_ids=None,
|
956 |
+
use_cache=True,
|
957 |
+
logits_to_keep=None,
|
958 |
+
**kwargs,
|
959 |
+
):
|
960 |
+
# Overwritten -- this model may need to switch between short and long rope, invalidating the cache in the
|
961 |
+
# process
|
962 |
+
|
963 |
+
# When the first time input length reached long and short factor switching point, enforce re-compute cache
|
964 |
+
# It will cause downside of slower at this single token position, however, better than current failure.
|
965 |
+
if (
|
966 |
+
past_key_values
|
967 |
+
and self.config.rope_scaling
|
968 |
+
and input_ids.shape[1] >= self.config.original_max_position_embeddings + 1
|
969 |
+
):
|
970 |
+
past_length = cache_position[0]
|
971 |
+
if past_length <= self.config.original_max_position_embeddings:
|
972 |
+
past_key_values = None
|
973 |
+
|
974 |
+
model_inputs = super().prepare_inputs_for_generation(
|
975 |
+
input_ids=input_ids,
|
976 |
+
past_key_values=past_key_values,
|
977 |
+
attention_mask=attention_mask,
|
978 |
+
inputs_embeds=inputs_embeds,
|
979 |
+
cache_position=cache_position,
|
980 |
+
position_ids=position_ids,
|
981 |
+
use_cache=use_cache,
|
982 |
+
logits_to_keep=logits_to_keep,
|
983 |
+
**kwargs,
|
984 |
+
)
|
985 |
+
return model_inputs
|
986 |
+
|
987 |
+
|
988 |
+
@add_start_docstrings(
|
989 |
+
"""
|
990 |
+
The Phi3 Model transformer with a sequence classification head on top (linear layer).
|
991 |
+
|
992 |
+
[`Phi3ForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
993 |
+
(e.g. GPT-2) do.
|
994 |
+
|
995 |
+
Since it does classification on the last token, it requires to know the position of the last token. If a
|
996 |
+
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
997 |
+
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
998 |
+
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
999 |
+
each row of the batch).
|
1000 |
+
""",
|
1001 |
+
PHI3_START_DOCSTRING,
|
1002 |
+
)
|
1003 |
+
class Phi3ForSequenceClassification(Phi3PreTrainedModel):
|
1004 |
+
def __init__(self, config):
|
1005 |
+
super().__init__(config)
|
1006 |
+
self.num_labels = config.num_labels
|
1007 |
+
self.model = Phi3Model(config)
|
1008 |
+
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
1009 |
+
|
1010 |
+
# Initialize weights and apply final processing
|
1011 |
+
self.post_init()
|
1012 |
+
|
1013 |
+
def get_input_embeddings(self):
|
1014 |
+
return self.model.embed_tokens
|
1015 |
+
|
1016 |
+
def set_input_embeddings(self, value):
|
1017 |
+
self.model.embed_tokens = value
|
1018 |
+
|
1019 |
+
@add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
|
1020 |
+
def forward(
|
1021 |
+
self,
|
1022 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1023 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1024 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1025 |
+
past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
|
1026 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1027 |
+
labels: Optional[torch.LongTensor] = None,
|
1028 |
+
use_cache: Optional[bool] = None,
|
1029 |
+
output_attentions: Optional[bool] = None,
|
1030 |
+
output_hidden_states: Optional[bool] = None,
|
1031 |
+
return_dict: Optional[bool] = None,
|
1032 |
+
) -> Union[Tuple, SequenceClassifierOutputWithPast]:
|
1033 |
+
r"""
|
1034 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1035 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
1036 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
1037 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
1038 |
+
"""
|
1039 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1040 |
+
|
1041 |
+
transformer_outputs = self.model(
|
1042 |
+
input_ids,
|
1043 |
+
attention_mask=attention_mask,
|
1044 |
+
position_ids=position_ids,
|
1045 |
+
past_key_values=past_key_values,
|
1046 |
+
inputs_embeds=inputs_embeds,
|
1047 |
+
use_cache=use_cache,
|
1048 |
+
output_attentions=output_attentions,
|
1049 |
+
output_hidden_states=output_hidden_states,
|
1050 |
+
return_dict=return_dict,
|
1051 |
+
)
|
1052 |
+
hidden_states = transformer_outputs[0]
|
1053 |
+
logits = self.score(hidden_states)
|
1054 |
+
|
1055 |
+
if input_ids is not None:
|
1056 |
+
batch_size = input_ids.shape[0]
|
1057 |
+
else:
|
1058 |
+
batch_size = inputs_embeds.shape[0]
|
1059 |
+
|
1060 |
+
if self.config.pad_token_id is None and batch_size != 1:
|
1061 |
+
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
|
1062 |
+
if self.config.pad_token_id is None:
|
1063 |
+
last_non_pad_token = -1
|
1064 |
+
elif input_ids is not None:
|
1065 |
+
# To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id
|
1066 |
+
non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
|
1067 |
+
token_indices = torch.arange(input_ids.shape[-1], device=logits.device)
|
1068 |
+
last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
|
1069 |
+
else:
|
1070 |
+
last_non_pad_token = -1
|
1071 |
+
logger.warning_once(
|
1072 |
+
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
|
1073 |
+
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
|
1074 |
+
)
|
1075 |
+
|
1076 |
+
pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
|
1077 |
+
|
1078 |
+
loss = None
|
1079 |
+
if labels is not None:
|
1080 |
+
loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config)
|
1081 |
+
|
1082 |
+
if not return_dict:
|
1083 |
+
output = (pooled_logits,) + transformer_outputs[1:]
|
1084 |
+
return ((loss,) + output) if loss is not None else output
|
1085 |
+
|
1086 |
+
return SequenceClassifierOutputWithPast(
|
1087 |
+
loss=loss,
|
1088 |
+
logits=pooled_logits,
|
1089 |
+
past_key_values=transformer_outputs.past_key_values,
|
1090 |
+
hidden_states=transformer_outputs.hidden_states,
|
1091 |
+
attentions=transformer_outputs.attentions,
|
1092 |
+
)
|
1093 |
+
|
1094 |
+
|
1095 |
+
@add_start_docstrings(
|
1096 |
+
"""
|
1097 |
+
The Phi3 Model transformer with a token classification head on top (a linear layer on top of the hidden-states
|
1098 |
+
output) e.g. for Named-Entity-Recognition (NER) tasks.
|
1099 |
+
""",
|
1100 |
+
PHI3_START_DOCSTRING,
|
1101 |
+
)
|
1102 |
+
class Phi3ForTokenClassification(Phi3PreTrainedModel):
|
1103 |
+
def __init__(self, config):
|
1104 |
+
super().__init__(config)
|
1105 |
+
self.num_labels = config.num_labels
|
1106 |
+
self.model = Phi3Model(config)
|
1107 |
+
if getattr(config, "classifier_dropout", None) is not None:
|
1108 |
+
classifier_dropout = config.classifier_dropout
|
1109 |
+
elif getattr(config, "hidden_dropout", None) is not None:
|
1110 |
+
classifier_dropout = config.hidden_dropout
|
1111 |
+
else:
|
1112 |
+
classifier_dropout = 0.1
|
1113 |
+
self.dropout = nn.Dropout(classifier_dropout)
|
1114 |
+
self.score = nn.Linear(config.hidden_size, config.num_labels)
|
1115 |
+
|
1116 |
+
# Initialize weights and apply final processing
|
1117 |
+
self.post_init()
|
1118 |
+
|
1119 |
+
def get_input_embeddings(self):
|
1120 |
+
return self.model.embed_tokens
|
1121 |
+
|
1122 |
+
def set_input_embeddings(self, value):
|
1123 |
+
self.model.embed_tokens = value
|
1124 |
+
|
1125 |
+
@add_start_docstrings_to_model_forward(PHI3_INPUTS_DOCSTRING)
|
1126 |
+
@add_code_sample_docstrings(
|
1127 |
+
checkpoint=_CHECKPOINT_FOR_DOC,
|
1128 |
+
output_type=TokenClassifierOutput,
|
1129 |
+
config_class=_CONFIG_FOR_DOC,
|
1130 |
+
)
|
1131 |
+
def forward(
|
1132 |
+
self,
|
1133 |
+
input_ids: Optional[torch.LongTensor] = None,
|
1134 |
+
attention_mask: Optional[torch.Tensor] = None,
|
1135 |
+
position_ids: Optional[torch.LongTensor] = None,
|
1136 |
+
past_key_values: Optional[List[torch.FloatTensor]] = None,
|
1137 |
+
inputs_embeds: Optional[torch.FloatTensor] = None,
|
1138 |
+
labels: Optional[torch.LongTensor] = None,
|
1139 |
+
use_cache: Optional[bool] = None,
|
1140 |
+
output_attentions: Optional[bool] = None,
|
1141 |
+
output_hidden_states: Optional[bool] = None,
|
1142 |
+
return_dict: Optional[bool] = None,
|
1143 |
+
) -> Union[Tuple, TokenClassifierOutput]:
|
1144 |
+
r"""
|
1145 |
+
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
1146 |
+
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
1147 |
+
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
1148 |
+
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
1149 |
+
"""
|
1150 |
+
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
1151 |
+
|
1152 |
+
outputs = self.model(
|
1153 |
+
input_ids,
|
1154 |
+
attention_mask=attention_mask,
|
1155 |
+
position_ids=position_ids,
|
1156 |
+
past_key_values=past_key_values,
|
1157 |
+
inputs_embeds=inputs_embeds,
|
1158 |
+
use_cache=use_cache,
|
1159 |
+
output_attentions=output_attentions,
|
1160 |
+
output_hidden_states=output_hidden_states,
|
1161 |
+
return_dict=return_dict,
|
1162 |
+
)
|
1163 |
+
sequence_output = outputs[0]
|
1164 |
+
sequence_output = self.dropout(sequence_output)
|
1165 |
+
logits = self.score(sequence_output)
|
1166 |
+
|
1167 |
+
loss = None
|
1168 |
+
if labels is not None:
|
1169 |
+
loss = self.loss_function(logits, labels, self.config)
|
1170 |
+
|
1171 |
+
if not return_dict:
|
1172 |
+
output = (logits,) + outputs[2:]
|
1173 |
+
return ((loss,) + output) if loss is not None else output
|
1174 |
+
|
1175 |
+
return TokenClassifierOutput(
|
1176 |
+
loss=loss,
|
1177 |
+
logits=logits,
|
1178 |
+
hidden_states=outputs.hidden_states,
|
1179 |
+
attentions=outputs.attentions,
|
1180 |
+
)
|
sample_finetune.py
ADDED
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import logging
|
3 |
+
|
4 |
+
import datasets
|
5 |
+
from datasets import load_dataset
|
6 |
+
from peft import LoraConfig
|
7 |
+
import torch
|
8 |
+
import transformers
|
9 |
+
from trl import SFTTrainer
|
10 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, TrainingArguments, BitsAndBytesConfig
|
11 |
+
|
12 |
+
"""
|
13 |
+
A simple example on using SFTTrainer and Accelerate to finetune Phi-4-Mini-Instruct model. For
|
14 |
+
a more advanced example, please follow HF alignment-handbook/scripts/run_sft.py.
|
15 |
+
This example has utilized DeepSpeed ZeRO3 offload to reduce the memory usage. The
|
16 |
+
script can be run on V100 or later generation GPUs. Here are some suggestions on
|
17 |
+
futher reducing memory consumption:
|
18 |
+
- reduce batch size
|
19 |
+
- decrease lora dimension
|
20 |
+
- restrict lora target modules
|
21 |
+
Please follow these steps to run the script:
|
22 |
+
1. Install dependencies:
|
23 |
+
conda install -c conda-forge accelerate=1.3.0
|
24 |
+
pip3 install -i https://pypi.org/simple/ bitsandbytes
|
25 |
+
pip3 install peft==0.14.0
|
26 |
+
pip3 install transformers==4.48.1
|
27 |
+
pip3 install trl datasets
|
28 |
+
pip3 install deepspeed
|
29 |
+
2. Setup accelerate and deepspeed config based on the machine used:
|
30 |
+
accelerate config
|
31 |
+
Here is a sample config for deepspeed zero3:
|
32 |
+
compute_environment: LOCAL_MACHINE
|
33 |
+
debug: false
|
34 |
+
deepspeed_config:
|
35 |
+
gradient_accumulation_steps: 1
|
36 |
+
offload_optimizer_device: none
|
37 |
+
offload_param_device: none
|
38 |
+
zero3_init_flag: true
|
39 |
+
zero3_save_16bit_model: true
|
40 |
+
zero_stage: 3
|
41 |
+
distributed_type: DEEPSPEED
|
42 |
+
downcast_bf16: 'no'
|
43 |
+
enable_cpu_affinity: false
|
44 |
+
machine_rank: 0
|
45 |
+
main_training_function: main
|
46 |
+
mixed_precision: bf16
|
47 |
+
num_machines: 1
|
48 |
+
num_processes: 4
|
49 |
+
rdzv_backend: static
|
50 |
+
same_network: true
|
51 |
+
tpu_env: []
|
52 |
+
tpu_use_cluster: false
|
53 |
+
tpu_use_sudo: false
|
54 |
+
use_cpu: false
|
55 |
+
3. check accelerate config:
|
56 |
+
accelerate env
|
57 |
+
4. Run the code:
|
58 |
+
accelerate launch sample_finetune.py
|
59 |
+
"""
|
60 |
+
|
61 |
+
logger = logging.getLogger(__name__)
|
62 |
+
|
63 |
+
|
64 |
+
###################
|
65 |
+
# Hyper-parameters
|
66 |
+
###################
|
67 |
+
training_config = {
|
68 |
+
"bf16": True,
|
69 |
+
"do_eval": False,
|
70 |
+
"learning_rate": 5.0e-06,
|
71 |
+
"log_level": "info",
|
72 |
+
"logging_steps": 20,
|
73 |
+
"logging_strategy": "steps",
|
74 |
+
"lr_scheduler_type": "cosine",
|
75 |
+
"num_train_epochs": 1,
|
76 |
+
"max_steps": -1,
|
77 |
+
"output_dir": "./checkpoint_dir",
|
78 |
+
"overwrite_output_dir": True,
|
79 |
+
"per_device_eval_batch_size": 4,
|
80 |
+
"per_device_train_batch_size": 4,
|
81 |
+
"remove_unused_columns": True,
|
82 |
+
"save_steps": 100,
|
83 |
+
"save_total_limit": 1,
|
84 |
+
"seed": 0,
|
85 |
+
"gradient_checkpointing": True,
|
86 |
+
"gradient_checkpointing_kwargs":{"use_reentrant": False},
|
87 |
+
"gradient_accumulation_steps": 1,
|
88 |
+
"warmup_ratio": 0.2,
|
89 |
+
}
|
90 |
+
|
91 |
+
peft_config = {
|
92 |
+
"r": 16,
|
93 |
+
"lora_alpha": 32,
|
94 |
+
"lora_dropout": 0.05,
|
95 |
+
"bias": "none",
|
96 |
+
"task_type": "CAUSAL_LM",
|
97 |
+
"target_modules": "all-linear",
|
98 |
+
"modules_to_save": None,
|
99 |
+
}
|
100 |
+
train_conf = TrainingArguments(**training_config)
|
101 |
+
peft_conf = LoraConfig(**peft_config)
|
102 |
+
|
103 |
+
|
104 |
+
###############
|
105 |
+
# Setup logging
|
106 |
+
###############
|
107 |
+
logging.basicConfig(
|
108 |
+
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
|
109 |
+
datefmt="%Y-%m-%d %H:%M:%S",
|
110 |
+
handlers=[logging.StreamHandler(sys.stdout)],
|
111 |
+
)
|
112 |
+
log_level = train_conf.get_process_log_level()
|
113 |
+
logger.setLevel(log_level)
|
114 |
+
datasets.utils.logging.set_verbosity(log_level)
|
115 |
+
transformers.utils.logging.set_verbosity(log_level)
|
116 |
+
transformers.utils.logging.enable_default_handler()
|
117 |
+
transformers.utils.logging.enable_explicit_format()
|
118 |
+
|
119 |
+
# Log on each process a small summary
|
120 |
+
logger.warning(
|
121 |
+
f"Process rank: {train_conf.local_rank}, device: {train_conf.device}, n_gpu: {train_conf.n_gpu}"
|
122 |
+
+ f" distributed training: {bool(train_conf.local_rank != -1)}, 16-bits training: {train_conf.fp16}"
|
123 |
+
)
|
124 |
+
logger.info(f"Training/evaluation parameters {train_conf}")
|
125 |
+
logger.info(f"PEFT parameters {peft_conf}")
|
126 |
+
|
127 |
+
|
128 |
+
################
|
129 |
+
# Model Loading
|
130 |
+
################
|
131 |
+
checkpoint_path = "microsoft/Phi-4-mini-instruct"
|
132 |
+
model_kwargs = dict(
|
133 |
+
use_cache=False,
|
134 |
+
trust_remote_code=True,
|
135 |
+
attn_implementation="flash_attention_2", # loading the model with flash-attenstion support
|
136 |
+
torch_dtype=torch.bfloat16,
|
137 |
+
device_map=None
|
138 |
+
)
|
139 |
+
model = AutoModelForCausalLM.from_pretrained(checkpoint_path, **model_kwargs)
|
140 |
+
tokenizer = AutoTokenizer.from_pretrained(checkpoint_path)
|
141 |
+
tokenizer.model_max_length = 2048
|
142 |
+
tokenizer.pad_token = tokenizer.unk_token # use unk rather than eos token to prevent endless generation
|
143 |
+
tokenizer.pad_token_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token)
|
144 |
+
tokenizer.padding_side = 'right'
|
145 |
+
|
146 |
+
|
147 |
+
##################
|
148 |
+
# Data Processing
|
149 |
+
##################
|
150 |
+
def apply_chat_template(
|
151 |
+
example,
|
152 |
+
tokenizer,
|
153 |
+
):
|
154 |
+
messages = example["messages"]
|
155 |
+
example["text"] = tokenizer.apply_chat_template(
|
156 |
+
messages, tokenize=False, add_generation_prompt=False)
|
157 |
+
return example
|
158 |
+
|
159 |
+
|
160 |
+
train_dataset, test_dataset = load_dataset("HuggingFaceH4/ultrachat_200k", split=["train_sft", "test_sft"])
|
161 |
+
column_names = list(train_dataset.features)
|
162 |
+
|
163 |
+
processed_train_dataset = train_dataset.map(
|
164 |
+
apply_chat_template,
|
165 |
+
fn_kwargs={"tokenizer": tokenizer},
|
166 |
+
num_proc=10,
|
167 |
+
remove_columns=column_names,
|
168 |
+
desc="Applying chat template to train_sft",
|
169 |
+
)
|
170 |
+
|
171 |
+
processed_test_dataset = test_dataset.map(
|
172 |
+
apply_chat_template,
|
173 |
+
fn_kwargs={"tokenizer": tokenizer},
|
174 |
+
num_proc=10,
|
175 |
+
remove_columns=column_names,
|
176 |
+
desc="Applying chat template to test_sft",
|
177 |
+
)
|
178 |
+
|
179 |
+
|
180 |
+
###########
|
181 |
+
# Training
|
182 |
+
###########
|
183 |
+
trainer = SFTTrainer(
|
184 |
+
model=model,
|
185 |
+
args=train_conf,
|
186 |
+
peft_config=peft_conf,
|
187 |
+
train_dataset=processed_train_dataset,
|
188 |
+
eval_dataset=processed_test_dataset,
|
189 |
+
max_seq_length=2048,
|
190 |
+
dataset_text_field="text",
|
191 |
+
tokenizer=tokenizer,
|
192 |
+
packing=True
|
193 |
+
)
|
194 |
+
train_result = trainer.train()
|
195 |
+
metrics = train_result.metrics
|
196 |
+
trainer.log_metrics("train", metrics)
|
197 |
+
trainer.save_metrics("train", metrics)
|
198 |
+
trainer.save_state()
|
199 |
+
|
200 |
+
|
201 |
+
#############
|
202 |
+
# Evaluation
|
203 |
+
#############
|
204 |
+
tokenizer.padding_side = 'left'
|
205 |
+
metrics = trainer.evaluate()
|
206 |
+
metrics["eval_samples"] = len(processed_test_dataset)
|
207 |
+
trainer.log_metrics("eval", metrics)
|
208 |
+
trainer.save_metrics("eval", metrics)
|
209 |
+
|
210 |
+
|
211 |
+
# ############
|
212 |
+
# # Save model
|
213 |
+
# ############
|
214 |
+
trainer.save_model(train_conf.output_dir)
|
special_tokens_map.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"bos_token": {
|
3 |
+
"content": "<|endoftext|>",
|
4 |
+
"lstrip": false,
|
5 |
+
"normalized": false,
|
6 |
+
"rstrip": false,
|
7 |
+
"single_word": false
|
8 |
+
},
|
9 |
+
"eos_token": {
|
10 |
+
"content": "<|endoftext|>",
|
11 |
+
"lstrip": false,
|
12 |
+
"normalized": false,
|
13 |
+
"rstrip": false,
|
14 |
+
"single_word": false
|
15 |
+
},
|
16 |
+
"pad_token": {
|
17 |
+
"content": "<|endoftext|>",
|
18 |
+
"lstrip": false,
|
19 |
+
"normalized": false,
|
20 |
+
"rstrip": false,
|
21 |
+
"single_word": false
|
22 |
+
},
|
23 |
+
"unk_token": {
|
24 |
+
"content": "<|endoftext|>",
|
25 |
+
"lstrip": false,
|
26 |
+
"normalized": false,
|
27 |
+
"rstrip": false,
|
28 |
+
"single_word": false
|
29 |
+
}
|
30 |
+
}
|
tokenizer.json
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:382cc235b56c725945e149cc25f191da667c836655efd0857b004320e90e91ea
|
3 |
+
size 15524095
|
tokenizer_config.json
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": false,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"add_prefix_space": false,
|
5 |
+
"added_tokens_decoder": {
|
6 |
+
"199999": {
|
7 |
+
"content": "<|endoftext|>",
|
8 |
+
"lstrip": false,
|
9 |
+
"normalized": false,
|
10 |
+
"rstrip": false,
|
11 |
+
"single_word": false,
|
12 |
+
"special": true
|
13 |
+
},
|
14 |
+
"200018": {
|
15 |
+
"content": "<|endofprompt|>",
|
16 |
+
"lstrip": false,
|
17 |
+
"normalized": false,
|
18 |
+
"rstrip": false,
|
19 |
+
"single_word": false,
|
20 |
+
"special": true
|
21 |
+
},
|
22 |
+
"200019": {
|
23 |
+
"content": "<|assistant|>",
|
24 |
+
"lstrip": false,
|
25 |
+
"normalized": false,
|
26 |
+
"rstrip": true,
|
27 |
+
"single_word": false,
|
28 |
+
"special": true
|
29 |
+
},
|
30 |
+
"200020": {
|
31 |
+
"content": "<|end|>",
|
32 |
+
"lstrip": false,
|
33 |
+
"normalized": false,
|
34 |
+
"rstrip": true,
|
35 |
+
"single_word": false,
|
36 |
+
"special": true
|
37 |
+
},
|
38 |
+
"200021": {
|
39 |
+
"content": "<|user|>",
|
40 |
+
"lstrip": false,
|
41 |
+
"normalized": false,
|
42 |
+
"rstrip": true,
|
43 |
+
"single_word": false,
|
44 |
+
"special": true
|
45 |
+
},
|
46 |
+
"200022": {
|
47 |
+
"content": "<|system|>",
|
48 |
+
"lstrip": false,
|
49 |
+
"normalized": false,
|
50 |
+
"rstrip": true,
|
51 |
+
"single_word": false,
|
52 |
+
"special": true
|
53 |
+
},
|
54 |
+
"200023": {
|
55 |
+
"content": "<|tool|>",
|
56 |
+
"lstrip": false,
|
57 |
+
"normalized": false,
|
58 |
+
"rstrip": true,
|
59 |
+
"single_word": false,
|
60 |
+
"special": false
|
61 |
+
},
|
62 |
+
"200024": {
|
63 |
+
"content": "<|/tool|>",
|
64 |
+
"lstrip": false,
|
65 |
+
"normalized": false,
|
66 |
+
"rstrip": true,
|
67 |
+
"single_word": false,
|
68 |
+
"special": false
|
69 |
+
},
|
70 |
+
"200025": {
|
71 |
+
"content": "<|tool_call|>",
|
72 |
+
"lstrip": false,
|
73 |
+
"normalized": false,
|
74 |
+
"rstrip": true,
|
75 |
+
"single_word": false,
|
76 |
+
"special": false
|
77 |
+
},
|
78 |
+
"200026": {
|
79 |
+
"content": "<|/tool_call|>",
|
80 |
+
"lstrip": false,
|
81 |
+
"normalized": false,
|
82 |
+
"rstrip": true,
|
83 |
+
"single_word": false,
|
84 |
+
"special": false
|
85 |
+
},
|
86 |
+
"200027": {
|
87 |
+
"content": "<|tool_response|>",
|
88 |
+
"lstrip": false,
|
89 |
+
"normalized": false,
|
90 |
+
"rstrip": true,
|
91 |
+
"single_word": false,
|
92 |
+
"special": false
|
93 |
+
},
|
94 |
+
"200028": {
|
95 |
+
"content": "<|tag|>",
|
96 |
+
"lstrip": false,
|
97 |
+
"normalized": false,
|
98 |
+
"rstrip": true,
|
99 |
+
"single_word": false,
|
100 |
+
"special": true
|
101 |
+
}
|
102 |
+
},
|
103 |
+
"bos_token": "<|endoftext|>",
|
104 |
+
"chat_template": "{% for message in messages %}{% if message['role'] == 'system' and 'tools' in message and message['tools'] is not none %}{{ '<|' + message['role'] + '|>' + message['content'] + '<|tool|>' + message['tools'] + '<|/tool|>' + '<|end|>' }}{% else %}{{ '<|' + message['role'] + '|>' + message['content'] + '<|end|>' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ '<|assistant|>' }}{% else %}{{ eos_token }}{% endif %}",
|
105 |
+
"clean_up_tokenization_spaces": false,
|
106 |
+
"eos_token": "<|endoftext|>",
|
107 |
+
"model_max_length": 131072,
|
108 |
+
"pad_token": "<|endoftext|>",
|
109 |
+
"tokenizer_class": "GPT2Tokenizer",
|
110 |
+
"unk_token": "<|endoftext|>"
|
111 |
+
}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|