Upload 13 files
Browse files- README.md +229 -0
- config.json +39 -0
- generation_config.json +6 -0
- merges.txt +0 -0
- pytorch_model-00001-of-00004.bin +3 -0
- pytorch_model-00002-of-00004.bin +3 -0
- pytorch_model-00003-of-00004.bin +3 -0
- pytorch_model-00004-of-00004.bin +3 -0
- pytorch_model.bin.index.json +516 -0
- tokenizer.json +0 -0
- tokenizer_config.json +30 -0
- vocab.json +0 -0
README.md
ADDED
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
pipeline_tag: text-generation
|
3 |
+
inference: true
|
4 |
+
widget:
|
5 |
+
- text: 'def print_hello_world():'
|
6 |
+
example_title: Hello world
|
7 |
+
group: Python
|
8 |
+
license: bigcode-openrail-m
|
9 |
+
datasets:
|
10 |
+
- bigcode/the-stack-dedup
|
11 |
+
metrics:
|
12 |
+
- code_eval
|
13 |
+
library_name: transformers
|
14 |
+
tags:
|
15 |
+
- code
|
16 |
+
model-index:
|
17 |
+
- name: StarCoder-7B
|
18 |
+
results:
|
19 |
+
- task:
|
20 |
+
type: text-generation
|
21 |
+
dataset:
|
22 |
+
type: openai_humaneval
|
23 |
+
name: HumanEval
|
24 |
+
metrics:
|
25 |
+
- name: pass@1
|
26 |
+
type: pass@1
|
27 |
+
value: 28.37
|
28 |
+
verified: false
|
29 |
+
- task:
|
30 |
+
type: text-generation
|
31 |
+
dataset:
|
32 |
+
type: nuprl/MultiPL-E
|
33 |
+
name: MultiPL-HumanEval (C++)
|
34 |
+
metrics:
|
35 |
+
- name: pass@1
|
36 |
+
type: pass@1
|
37 |
+
value: 23.3
|
38 |
+
verified: false
|
39 |
+
- task:
|
40 |
+
type: text-generation
|
41 |
+
dataset:
|
42 |
+
type: nuprl/MultiPL-E
|
43 |
+
name: MultiPL-HumanEval (Java)
|
44 |
+
metrics:
|
45 |
+
- name: pass@1
|
46 |
+
type: pass@1
|
47 |
+
value: 24.44
|
48 |
+
verified: false
|
49 |
+
- task:
|
50 |
+
type: text-generation
|
51 |
+
dataset:
|
52 |
+
type: nuprl/MultiPL-E
|
53 |
+
name: MultiPL-HumanEval (JavaScript)
|
54 |
+
metrics:
|
55 |
+
- name: pass@1
|
56 |
+
type: pass@1
|
57 |
+
value: 27.35
|
58 |
+
verified: false
|
59 |
+
- task:
|
60 |
+
type: text-generation
|
61 |
+
dataset:
|
62 |
+
type: nuprl/MultiPL-E
|
63 |
+
name: MultiPL-HumanEval (PHP)
|
64 |
+
metrics:
|
65 |
+
- name: pass@1
|
66 |
+
type: pass@1
|
67 |
+
value: 22.12
|
68 |
+
verified: false
|
69 |
+
- task:
|
70 |
+
type: text-generation
|
71 |
+
dataset:
|
72 |
+
type: nuprl/MultiPL-E
|
73 |
+
name: MultiPL-HumanEval (Lua)
|
74 |
+
metrics:
|
75 |
+
- name: pass@1
|
76 |
+
type: pass@1
|
77 |
+
value: 23.35
|
78 |
+
verified: false
|
79 |
+
- task:
|
80 |
+
type: text-generation
|
81 |
+
dataset:
|
82 |
+
type: nuprl/MultiPL-E
|
83 |
+
name: MultiPL-HumanEval (Rust)
|
84 |
+
metrics:
|
85 |
+
- name: pass@1
|
86 |
+
type: pass@1
|
87 |
+
value: 22.6
|
88 |
+
verified: false
|
89 |
+
- task:
|
90 |
+
type: text-generation
|
91 |
+
dataset:
|
92 |
+
type: nuprl/MultiPL-E
|
93 |
+
name: MultiPL-HumanEval (Swift)
|
94 |
+
metrics:
|
95 |
+
- name: pass@1
|
96 |
+
type: pass@1
|
97 |
+
value: 15.1
|
98 |
+
verified: false
|
99 |
+
- task:
|
100 |
+
type: text-generation
|
101 |
+
dataset:
|
102 |
+
type: nuprl/MultiPL-E
|
103 |
+
name: MultiPL-HumanEval (Julia)
|
104 |
+
metrics:
|
105 |
+
- name: pass@1
|
106 |
+
type: pass@1
|
107 |
+
value: 21.77
|
108 |
+
verified: false
|
109 |
+
- task:
|
110 |
+
type: text-generation
|
111 |
+
dataset:
|
112 |
+
type: nuprl/MultiPL-E
|
113 |
+
name: MultiPL-HumanEval (R)
|
114 |
+
metrics:
|
115 |
+
- name: pass@1
|
116 |
+
type: pass@1
|
117 |
+
value: 14.51
|
118 |
+
verified: false
|
119 |
+
extra_gated_prompt: >-
|
120 |
+
## Model License Agreement
|
121 |
+
|
122 |
+
Please read the BigCode [OpenRAIL-M
|
123 |
+
license](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement)
|
124 |
+
agreement before accepting it.
|
125 |
+
|
126 |
+
extra_gated_fields:
|
127 |
+
I accept the above license agreement, and will use the Model complying with the set of use restrictions and sharing requirements: checkbox
|
128 |
+
duplicated_from: bigcode-data/starcoderbase-7b
|
129 |
+
---
|
130 |
+
|
131 |
+
|
132 |
+
# StarCoderBase-7B
|
133 |
+
|
134 |
+
7B version of [StarCoderBase](https://huggingface.co/bigcode/starcoderbase).
|
135 |
+
|
136 |
+
## Table of Contents
|
137 |
+
|
138 |
+
1. [Model Summary](##model-summary)
|
139 |
+
2. [Use](##use)
|
140 |
+
3. [Limitations](##limitations)
|
141 |
+
4. [Training](##training)
|
142 |
+
5. [License](##license)
|
143 |
+
6. [Citation](##citation)
|
144 |
+
|
145 |
+
## Model Summary
|
146 |
+
|
147 |
+
StarCoderBase-7B is a 7B parameter model trained on 80+ programming languages from [The Stack (v1.2)](https://huggingface.co/datasets/bigcode/the-stack), with opt-out requests excluded. The model uses [Multi Query Attention](https://arxiv.org/abs/1911.02150), [a context window of 8192 tokens](https://arxiv.org/abs/2205.14135), and was trained using the [Fill-in-the-Middle objective](https://arxiv.org/abs/2207.14255) on 1 trillion tokens.
|
148 |
+
|
149 |
+
- **Repository:** [bigcode/Megatron-LM](https://github.com/bigcode-project/Megatron-LM)
|
150 |
+
- **Project Website:** [bigcode-project.org](https://www.bigcode-project.org)
|
151 |
+
- **Paper:** [💫StarCoder: May the source be with you!](https://arxiv.org/abs/2305.06161)
|
152 |
+
- **Point of Contact:** [[email protected]](mailto:[email protected])
|
153 |
+
- **Languages:** 80+ Programming languages
|
154 |
+
|
155 |
+
|
156 |
+
## Use
|
157 |
+
|
158 |
+
### Intended use
|
159 |
+
|
160 |
+
The model was trained on GitHub code. As such it is _not_ an instruction model and commands like "Write a function that computes the square root." do not work well. However, by using the [Tech Assistant prompt](https://huggingface.co/datasets/bigcode/ta-prompt) you can turn it into a capable technical assistant.
|
161 |
+
|
162 |
+
**Feel free to share your generations in the Community tab!**
|
163 |
+
|
164 |
+
### Generation
|
165 |
+
```python
|
166 |
+
# pip install -q transformers
|
167 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
168 |
+
|
169 |
+
checkpoint = "bigcode/starcoderbase-7b"
|
170 |
+
device = "cuda" # for GPU usage or "cpu" for CPU usage
|
171 |
+
|
172 |
+
tokenizer = AutoTokenizer.from_pretrained(checkpoint)
|
173 |
+
model = AutoModelForCausalLM.from_pretrained(checkpoint).to(device)
|
174 |
+
|
175 |
+
inputs = tokenizer.encode("def print_hello_world():", return_tensors="pt").to(device)
|
176 |
+
outputs = model.generate(inputs)
|
177 |
+
print(tokenizer.decode(outputs[0]))
|
178 |
+
```
|
179 |
+
|
180 |
+
### Fill-in-the-middle
|
181 |
+
Fill-in-the-middle uses special tokens to identify the prefix/middle/suffix part of the input and output:
|
182 |
+
|
183 |
+
```python
|
184 |
+
input_text = "<fim_prefix>def print_hello_world():\n <fim_suffix>\n print('Hello world!')<fim_middle>"
|
185 |
+
inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
|
186 |
+
outputs = model.generate(inputs)
|
187 |
+
print(tokenizer.decode(outputs[0]))
|
188 |
+
```
|
189 |
+
|
190 |
+
### Attribution & Other Requirements
|
191 |
+
|
192 |
+
The pretraining dataset of the model was filtered for permissive licenses only. Nevertheless, the model can generate source code verbatim from the dataset. The code's license might require attribution and/or other specific requirements that must be respected. We provide a [search index](https://huggingface.co/spaces/bigcode/starcoder-search) that let's you search through the pretraining data to identify where generated code came from and apply the proper attribution to your code.
|
193 |
+
|
194 |
+
# Limitations
|
195 |
+
|
196 |
+
The model has been trained on source code from 80+ programming languages. The predominant natural language in source code is English although other languages are also present. As such the model is capable of generating code snippets provided some context but the generated code is not guaranteed to work as intended. It can be inefficient, contain bugs or exploits. See [the paper](https://drive.google.com/file/d/1cN-b9GnWtHzQRoE7M7gAEyivY0kl4BYs/view) for an in-depth discussion of the model limitations.
|
197 |
+
|
198 |
+
# Training
|
199 |
+
|
200 |
+
## Model
|
201 |
+
|
202 |
+
- **Architecture:** GPT-2 model with multi-query attention and Fill-in-the-Middle objective
|
203 |
+
- **Pretraining steps:** 250k
|
204 |
+
- **Pretraining tokens:** 1 trillion
|
205 |
+
- **Precision:** bfloat16
|
206 |
+
|
207 |
+
## Hardware
|
208 |
+
|
209 |
+
- **GPUs:** 512 Tesla A100
|
210 |
+
|
211 |
+
## Software
|
212 |
+
|
213 |
+
- **Orchestration:** [Megatron-LM](https://github.com/bigcode-project/Megatron-LM)
|
214 |
+
- **Neural networks:** [PyTorch](https://github.com/pytorch/pytorch)
|
215 |
+
- **BP16 if applicable:** [apex](https://github.com/NVIDIA/apex)
|
216 |
+
|
217 |
+
# License
|
218 |
+
The model is licensed under the BigCode OpenRAIL-M v1 license agreement. You can find the full agreement [here](https://huggingface.co/spaces/bigcode/bigcode-model-license-agreement).
|
219 |
+
# Citation
|
220 |
+
```
|
221 |
+
@article{li2023starcoder,
|
222 |
+
title={StarCoder: may the source be with you!},
|
223 |
+
author={Raymond Li and Loubna Ben Allal and Yangtian Zi and Niklas Muennighoff and Denis Kocetkov and Chenghao Mou and Marc Marone and Christopher Akiki and Jia Li and Jenny Chim and Qian Liu and Evgenii Zheltonozhskii and Terry Yue Zhuo and Thomas Wang and Olivier Dehaene and Mishig Davaadorj and Joel Lamy-Poirier and João Monteiro and Oleh Shliazhko and Nicolas Gontier and Nicholas Meade and Armel Zebaze and Ming-Ho Yee and Logesh Kumar Umapathi and Jian Zhu and Benjamin Lipkin and Muhtasham Oblokulov and Zhiruo Wang and Rudra Murthy and Jason Stillerman and Siva Sankalp Patel and Dmitry Abulkhanov and Marco Zocca and Manan Dey and Zhihan Zhang and Nour Fahmy and Urvashi Bhattacharyya and Wenhao Yu and Swayam Singh and Sasha Luccioni and Paulo Villegas and Maxim Kunakov and Fedor Zhdanov and Manuel Romero and Tony Lee and Nadav Timor and Jennifer Ding and Claire Schlesinger and Hailey Schoelkopf and Jan Ebert and Tri Dao and Mayank Mishra and Alex Gu and Jennifer Robinson and Carolyn Jane Anderson and Brendan Dolan-Gavitt and Danish Contractor and Siva Reddy and Daniel Fried and Dzmitry Bahdanau and Yacine Jernite and Carlos Muñoz Ferrandis and Sean Hughes and Thomas Wolf and Arjun Guha and Leandro von Werra and Harm de Vries},
|
224 |
+
year={2023},
|
225 |
+
eprint={2305.06161},
|
226 |
+
archivePrefix={arXiv},
|
227 |
+
primaryClass={cs.CL}
|
228 |
+
}
|
229 |
+
```
|
config.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "/fsx/bigcode/experiments/pretraining/conversions/starcoder-7b",
|
3 |
+
"activation_function": "gelu",
|
4 |
+
"architectures": [
|
5 |
+
"GPTBigCodeForCausalLM"
|
6 |
+
],
|
7 |
+
"attention_softmax_in_fp32": true,
|
8 |
+
"multi_query": true,
|
9 |
+
"attn_pdrop": 0.1,
|
10 |
+
"bos_token_id": 0,
|
11 |
+
"embd_pdrop": 0.1,
|
12 |
+
"eos_token_id": 0,
|
13 |
+
"inference_runner": 0,
|
14 |
+
"initializer_range": 0.02,
|
15 |
+
"layer_norm_epsilon": 1e-05,
|
16 |
+
"max_batch_size": null,
|
17 |
+
"max_sequence_length": null,
|
18 |
+
"model_type": "gpt_bigcode",
|
19 |
+
"n_embd": 4096,
|
20 |
+
"n_head": 32,
|
21 |
+
"n_inner": 16384,
|
22 |
+
"n_layer": 42,
|
23 |
+
"n_positions": 8192,
|
24 |
+
"pad_key_length": true,
|
25 |
+
"pre_allocate_kv_cache": false,
|
26 |
+
"resid_pdrop": 0.1,
|
27 |
+
"scale_attention_softmax_in_fp32": true,
|
28 |
+
"scale_attn_weights": true,
|
29 |
+
"summary_activation": null,
|
30 |
+
"summary_first_dropout": 0.1,
|
31 |
+
"summary_proj_to_labels": true,
|
32 |
+
"summary_type": "cls_index",
|
33 |
+
"summary_use_proj": true,
|
34 |
+
"torch_dtype": "float32",
|
35 |
+
"transformers_version": "4.28.1",
|
36 |
+
"use_cache": true,
|
37 |
+
"validate_runner_input": true,
|
38 |
+
"vocab_size": 49152
|
39 |
+
}
|
generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 0,
|
4 |
+
"eos_token_id": 0,
|
5 |
+
"transformers_version": "4.28.1"
|
6 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pytorch_model-00001-of-00004.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1bc158094d8042472ffb102358edc404e01fca915f750f6583bd0a04493ba327
|
3 |
+
size 9859125333
|
pytorch_model-00002-of-00004.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:93bdbf4a261a3bdb9470cedf66bacbd998751418a807e78f1bfea9df6b6365b5
|
3 |
+
size 9993542361
|
pytorch_model-00003-of-00004.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:397636f10cc990cd9abb94960d71200b89ff2cb78e27900bdb21d5f25b376a4a
|
3 |
+
size 9456554659
|
pytorch_model-00004-of-00004.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:72bb6594879519b299cae04e8c433078eae2de78291511f1e4976a904f71bdc6
|
3 |
+
size 805307306
|
pytorch_model.bin.index.json
ADDED
@@ -0,0 +1,516 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"metadata": {
|
3 |
+
"total_size": 30114359296
|
4 |
+
},
|
5 |
+
"weight_map": {
|
6 |
+
"lm_head.weight": "pytorch_model-00004-of-00004.bin",
|
7 |
+
"transformer.h.0.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
|
8 |
+
"transformer.h.0.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
|
9 |
+
"transformer.h.0.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
10 |
+
"transformer.h.0.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
11 |
+
"transformer.h.0.ln_1.bias": "pytorch_model-00001-of-00004.bin",
|
12 |
+
"transformer.h.0.ln_1.weight": "pytorch_model-00001-of-00004.bin",
|
13 |
+
"transformer.h.0.ln_2.bias": "pytorch_model-00001-of-00004.bin",
|
14 |
+
"transformer.h.0.ln_2.weight": "pytorch_model-00001-of-00004.bin",
|
15 |
+
"transformer.h.0.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
|
16 |
+
"transformer.h.0.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
|
17 |
+
"transformer.h.0.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
18 |
+
"transformer.h.0.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
19 |
+
"transformer.h.1.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
|
20 |
+
"transformer.h.1.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
|
21 |
+
"transformer.h.1.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
22 |
+
"transformer.h.1.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
23 |
+
"transformer.h.1.ln_1.bias": "pytorch_model-00001-of-00004.bin",
|
24 |
+
"transformer.h.1.ln_1.weight": "pytorch_model-00001-of-00004.bin",
|
25 |
+
"transformer.h.1.ln_2.bias": "pytorch_model-00001-of-00004.bin",
|
26 |
+
"transformer.h.1.ln_2.weight": "pytorch_model-00001-of-00004.bin",
|
27 |
+
"transformer.h.1.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
|
28 |
+
"transformer.h.1.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
|
29 |
+
"transformer.h.1.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
30 |
+
"transformer.h.1.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
31 |
+
"transformer.h.10.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
|
32 |
+
"transformer.h.10.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
|
33 |
+
"transformer.h.10.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
34 |
+
"transformer.h.10.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
35 |
+
"transformer.h.10.ln_1.bias": "pytorch_model-00001-of-00004.bin",
|
36 |
+
"transformer.h.10.ln_1.weight": "pytorch_model-00001-of-00004.bin",
|
37 |
+
"transformer.h.10.ln_2.bias": "pytorch_model-00001-of-00004.bin",
|
38 |
+
"transformer.h.10.ln_2.weight": "pytorch_model-00001-of-00004.bin",
|
39 |
+
"transformer.h.10.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
|
40 |
+
"transformer.h.10.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
|
41 |
+
"transformer.h.10.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
42 |
+
"transformer.h.10.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
43 |
+
"transformer.h.11.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
|
44 |
+
"transformer.h.11.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
|
45 |
+
"transformer.h.11.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
46 |
+
"transformer.h.11.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
47 |
+
"transformer.h.11.ln_1.bias": "pytorch_model-00001-of-00004.bin",
|
48 |
+
"transformer.h.11.ln_1.weight": "pytorch_model-00001-of-00004.bin",
|
49 |
+
"transformer.h.11.ln_2.bias": "pytorch_model-00001-of-00004.bin",
|
50 |
+
"transformer.h.11.ln_2.weight": "pytorch_model-00001-of-00004.bin",
|
51 |
+
"transformer.h.11.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
|
52 |
+
"transformer.h.11.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
|
53 |
+
"transformer.h.11.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
54 |
+
"transformer.h.11.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
55 |
+
"transformer.h.12.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
|
56 |
+
"transformer.h.12.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
|
57 |
+
"transformer.h.12.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
58 |
+
"transformer.h.12.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
59 |
+
"transformer.h.12.ln_1.bias": "pytorch_model-00001-of-00004.bin",
|
60 |
+
"transformer.h.12.ln_1.weight": "pytorch_model-00001-of-00004.bin",
|
61 |
+
"transformer.h.12.ln_2.bias": "pytorch_model-00001-of-00004.bin",
|
62 |
+
"transformer.h.12.ln_2.weight": "pytorch_model-00001-of-00004.bin",
|
63 |
+
"transformer.h.12.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
|
64 |
+
"transformer.h.12.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
|
65 |
+
"transformer.h.12.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
66 |
+
"transformer.h.12.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
67 |
+
"transformer.h.13.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
|
68 |
+
"transformer.h.13.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
|
69 |
+
"transformer.h.13.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
70 |
+
"transformer.h.13.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
71 |
+
"transformer.h.13.ln_1.bias": "pytorch_model-00001-of-00004.bin",
|
72 |
+
"transformer.h.13.ln_1.weight": "pytorch_model-00001-of-00004.bin",
|
73 |
+
"transformer.h.13.ln_2.bias": "pytorch_model-00001-of-00004.bin",
|
74 |
+
"transformer.h.13.ln_2.weight": "pytorch_model-00001-of-00004.bin",
|
75 |
+
"transformer.h.13.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
|
76 |
+
"transformer.h.13.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
|
77 |
+
"transformer.h.13.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
78 |
+
"transformer.h.13.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
79 |
+
"transformer.h.14.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
|
80 |
+
"transformer.h.14.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
|
81 |
+
"transformer.h.14.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
82 |
+
"transformer.h.14.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
83 |
+
"transformer.h.14.ln_1.bias": "pytorch_model-00002-of-00004.bin",
|
84 |
+
"transformer.h.14.ln_1.weight": "pytorch_model-00002-of-00004.bin",
|
85 |
+
"transformer.h.14.ln_2.bias": "pytorch_model-00002-of-00004.bin",
|
86 |
+
"transformer.h.14.ln_2.weight": "pytorch_model-00002-of-00004.bin",
|
87 |
+
"transformer.h.14.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
|
88 |
+
"transformer.h.14.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
|
89 |
+
"transformer.h.14.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
90 |
+
"transformer.h.14.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
91 |
+
"transformer.h.15.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
|
92 |
+
"transformer.h.15.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
|
93 |
+
"transformer.h.15.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
94 |
+
"transformer.h.15.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
95 |
+
"transformer.h.15.ln_1.bias": "pytorch_model-00002-of-00004.bin",
|
96 |
+
"transformer.h.15.ln_1.weight": "pytorch_model-00002-of-00004.bin",
|
97 |
+
"transformer.h.15.ln_2.bias": "pytorch_model-00002-of-00004.bin",
|
98 |
+
"transformer.h.15.ln_2.weight": "pytorch_model-00002-of-00004.bin",
|
99 |
+
"transformer.h.15.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
|
100 |
+
"transformer.h.15.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
|
101 |
+
"transformer.h.15.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
102 |
+
"transformer.h.15.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
103 |
+
"transformer.h.16.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
|
104 |
+
"transformer.h.16.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
|
105 |
+
"transformer.h.16.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
106 |
+
"transformer.h.16.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
107 |
+
"transformer.h.16.ln_1.bias": "pytorch_model-00002-of-00004.bin",
|
108 |
+
"transformer.h.16.ln_1.weight": "pytorch_model-00002-of-00004.bin",
|
109 |
+
"transformer.h.16.ln_2.bias": "pytorch_model-00002-of-00004.bin",
|
110 |
+
"transformer.h.16.ln_2.weight": "pytorch_model-00002-of-00004.bin",
|
111 |
+
"transformer.h.16.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
|
112 |
+
"transformer.h.16.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
|
113 |
+
"transformer.h.16.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
114 |
+
"transformer.h.16.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
115 |
+
"transformer.h.17.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
|
116 |
+
"transformer.h.17.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
|
117 |
+
"transformer.h.17.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
118 |
+
"transformer.h.17.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
119 |
+
"transformer.h.17.ln_1.bias": "pytorch_model-00002-of-00004.bin",
|
120 |
+
"transformer.h.17.ln_1.weight": "pytorch_model-00002-of-00004.bin",
|
121 |
+
"transformer.h.17.ln_2.bias": "pytorch_model-00002-of-00004.bin",
|
122 |
+
"transformer.h.17.ln_2.weight": "pytorch_model-00002-of-00004.bin",
|
123 |
+
"transformer.h.17.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
|
124 |
+
"transformer.h.17.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
|
125 |
+
"transformer.h.17.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
126 |
+
"transformer.h.17.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
127 |
+
"transformer.h.18.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
|
128 |
+
"transformer.h.18.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
|
129 |
+
"transformer.h.18.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
130 |
+
"transformer.h.18.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
131 |
+
"transformer.h.18.ln_1.bias": "pytorch_model-00002-of-00004.bin",
|
132 |
+
"transformer.h.18.ln_1.weight": "pytorch_model-00002-of-00004.bin",
|
133 |
+
"transformer.h.18.ln_2.bias": "pytorch_model-00002-of-00004.bin",
|
134 |
+
"transformer.h.18.ln_2.weight": "pytorch_model-00002-of-00004.bin",
|
135 |
+
"transformer.h.18.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
|
136 |
+
"transformer.h.18.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
|
137 |
+
"transformer.h.18.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
138 |
+
"transformer.h.18.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
139 |
+
"transformer.h.19.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
|
140 |
+
"transformer.h.19.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
|
141 |
+
"transformer.h.19.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
142 |
+
"transformer.h.19.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
143 |
+
"transformer.h.19.ln_1.bias": "pytorch_model-00002-of-00004.bin",
|
144 |
+
"transformer.h.19.ln_1.weight": "pytorch_model-00002-of-00004.bin",
|
145 |
+
"transformer.h.19.ln_2.bias": "pytorch_model-00002-of-00004.bin",
|
146 |
+
"transformer.h.19.ln_2.weight": "pytorch_model-00002-of-00004.bin",
|
147 |
+
"transformer.h.19.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
|
148 |
+
"transformer.h.19.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
|
149 |
+
"transformer.h.19.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
150 |
+
"transformer.h.19.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
151 |
+
"transformer.h.2.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
|
152 |
+
"transformer.h.2.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
|
153 |
+
"transformer.h.2.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
154 |
+
"transformer.h.2.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
155 |
+
"transformer.h.2.ln_1.bias": "pytorch_model-00001-of-00004.bin",
|
156 |
+
"transformer.h.2.ln_1.weight": "pytorch_model-00001-of-00004.bin",
|
157 |
+
"transformer.h.2.ln_2.bias": "pytorch_model-00001-of-00004.bin",
|
158 |
+
"transformer.h.2.ln_2.weight": "pytorch_model-00001-of-00004.bin",
|
159 |
+
"transformer.h.2.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
|
160 |
+
"transformer.h.2.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
|
161 |
+
"transformer.h.2.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
162 |
+
"transformer.h.2.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
163 |
+
"transformer.h.20.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
|
164 |
+
"transformer.h.20.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
|
165 |
+
"transformer.h.20.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
166 |
+
"transformer.h.20.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
167 |
+
"transformer.h.20.ln_1.bias": "pytorch_model-00002-of-00004.bin",
|
168 |
+
"transformer.h.20.ln_1.weight": "pytorch_model-00002-of-00004.bin",
|
169 |
+
"transformer.h.20.ln_2.bias": "pytorch_model-00002-of-00004.bin",
|
170 |
+
"transformer.h.20.ln_2.weight": "pytorch_model-00002-of-00004.bin",
|
171 |
+
"transformer.h.20.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
|
172 |
+
"transformer.h.20.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
|
173 |
+
"transformer.h.20.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
174 |
+
"transformer.h.20.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
175 |
+
"transformer.h.21.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
|
176 |
+
"transformer.h.21.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
|
177 |
+
"transformer.h.21.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
178 |
+
"transformer.h.21.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
179 |
+
"transformer.h.21.ln_1.bias": "pytorch_model-00002-of-00004.bin",
|
180 |
+
"transformer.h.21.ln_1.weight": "pytorch_model-00002-of-00004.bin",
|
181 |
+
"transformer.h.21.ln_2.bias": "pytorch_model-00002-of-00004.bin",
|
182 |
+
"transformer.h.21.ln_2.weight": "pytorch_model-00002-of-00004.bin",
|
183 |
+
"transformer.h.21.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
|
184 |
+
"transformer.h.21.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
|
185 |
+
"transformer.h.21.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
186 |
+
"transformer.h.21.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
187 |
+
"transformer.h.22.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
|
188 |
+
"transformer.h.22.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
|
189 |
+
"transformer.h.22.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
190 |
+
"transformer.h.22.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
191 |
+
"transformer.h.22.ln_1.bias": "pytorch_model-00002-of-00004.bin",
|
192 |
+
"transformer.h.22.ln_1.weight": "pytorch_model-00002-of-00004.bin",
|
193 |
+
"transformer.h.22.ln_2.bias": "pytorch_model-00002-of-00004.bin",
|
194 |
+
"transformer.h.22.ln_2.weight": "pytorch_model-00002-of-00004.bin",
|
195 |
+
"transformer.h.22.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
|
196 |
+
"transformer.h.22.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
|
197 |
+
"transformer.h.22.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
198 |
+
"transformer.h.22.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
199 |
+
"transformer.h.23.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
|
200 |
+
"transformer.h.23.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
|
201 |
+
"transformer.h.23.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
202 |
+
"transformer.h.23.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
203 |
+
"transformer.h.23.ln_1.bias": "pytorch_model-00002-of-00004.bin",
|
204 |
+
"transformer.h.23.ln_1.weight": "pytorch_model-00002-of-00004.bin",
|
205 |
+
"transformer.h.23.ln_2.bias": "pytorch_model-00002-of-00004.bin",
|
206 |
+
"transformer.h.23.ln_2.weight": "pytorch_model-00002-of-00004.bin",
|
207 |
+
"transformer.h.23.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
|
208 |
+
"transformer.h.23.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
|
209 |
+
"transformer.h.23.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
210 |
+
"transformer.h.23.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
211 |
+
"transformer.h.24.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
|
212 |
+
"transformer.h.24.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
|
213 |
+
"transformer.h.24.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
214 |
+
"transformer.h.24.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
215 |
+
"transformer.h.24.ln_1.bias": "pytorch_model-00002-of-00004.bin",
|
216 |
+
"transformer.h.24.ln_1.weight": "pytorch_model-00002-of-00004.bin",
|
217 |
+
"transformer.h.24.ln_2.bias": "pytorch_model-00002-of-00004.bin",
|
218 |
+
"transformer.h.24.ln_2.weight": "pytorch_model-00002-of-00004.bin",
|
219 |
+
"transformer.h.24.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
|
220 |
+
"transformer.h.24.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
|
221 |
+
"transformer.h.24.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
222 |
+
"transformer.h.24.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
223 |
+
"transformer.h.25.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
|
224 |
+
"transformer.h.25.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
|
225 |
+
"transformer.h.25.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
226 |
+
"transformer.h.25.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
227 |
+
"transformer.h.25.ln_1.bias": "pytorch_model-00002-of-00004.bin",
|
228 |
+
"transformer.h.25.ln_1.weight": "pytorch_model-00002-of-00004.bin",
|
229 |
+
"transformer.h.25.ln_2.bias": "pytorch_model-00002-of-00004.bin",
|
230 |
+
"transformer.h.25.ln_2.weight": "pytorch_model-00002-of-00004.bin",
|
231 |
+
"transformer.h.25.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
|
232 |
+
"transformer.h.25.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
|
233 |
+
"transformer.h.25.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
234 |
+
"transformer.h.25.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
235 |
+
"transformer.h.26.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
|
236 |
+
"transformer.h.26.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
|
237 |
+
"transformer.h.26.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
238 |
+
"transformer.h.26.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
239 |
+
"transformer.h.26.ln_1.bias": "pytorch_model-00002-of-00004.bin",
|
240 |
+
"transformer.h.26.ln_1.weight": "pytorch_model-00002-of-00004.bin",
|
241 |
+
"transformer.h.26.ln_2.bias": "pytorch_model-00002-of-00004.bin",
|
242 |
+
"transformer.h.26.ln_2.weight": "pytorch_model-00002-of-00004.bin",
|
243 |
+
"transformer.h.26.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
|
244 |
+
"transformer.h.26.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
|
245 |
+
"transformer.h.26.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
246 |
+
"transformer.h.26.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
247 |
+
"transformer.h.27.attn.c_attn.bias": "pytorch_model-00002-of-00004.bin",
|
248 |
+
"transformer.h.27.attn.c_attn.weight": "pytorch_model-00002-of-00004.bin",
|
249 |
+
"transformer.h.27.attn.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
250 |
+
"transformer.h.27.attn.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
251 |
+
"transformer.h.27.ln_1.bias": "pytorch_model-00002-of-00004.bin",
|
252 |
+
"transformer.h.27.ln_1.weight": "pytorch_model-00002-of-00004.bin",
|
253 |
+
"transformer.h.27.ln_2.bias": "pytorch_model-00002-of-00004.bin",
|
254 |
+
"transformer.h.27.ln_2.weight": "pytorch_model-00002-of-00004.bin",
|
255 |
+
"transformer.h.27.mlp.c_fc.bias": "pytorch_model-00002-of-00004.bin",
|
256 |
+
"transformer.h.27.mlp.c_fc.weight": "pytorch_model-00002-of-00004.bin",
|
257 |
+
"transformer.h.27.mlp.c_proj.bias": "pytorch_model-00002-of-00004.bin",
|
258 |
+
"transformer.h.27.mlp.c_proj.weight": "pytorch_model-00002-of-00004.bin",
|
259 |
+
"transformer.h.28.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
|
260 |
+
"transformer.h.28.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
|
261 |
+
"transformer.h.28.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
262 |
+
"transformer.h.28.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
263 |
+
"transformer.h.28.ln_1.bias": "pytorch_model-00002-of-00004.bin",
|
264 |
+
"transformer.h.28.ln_1.weight": "pytorch_model-00002-of-00004.bin",
|
265 |
+
"transformer.h.28.ln_2.bias": "pytorch_model-00003-of-00004.bin",
|
266 |
+
"transformer.h.28.ln_2.weight": "pytorch_model-00003-of-00004.bin",
|
267 |
+
"transformer.h.28.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
|
268 |
+
"transformer.h.28.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
|
269 |
+
"transformer.h.28.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
270 |
+
"transformer.h.28.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
271 |
+
"transformer.h.29.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
|
272 |
+
"transformer.h.29.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
|
273 |
+
"transformer.h.29.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
274 |
+
"transformer.h.29.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
275 |
+
"transformer.h.29.ln_1.bias": "pytorch_model-00003-of-00004.bin",
|
276 |
+
"transformer.h.29.ln_1.weight": "pytorch_model-00003-of-00004.bin",
|
277 |
+
"transformer.h.29.ln_2.bias": "pytorch_model-00003-of-00004.bin",
|
278 |
+
"transformer.h.29.ln_2.weight": "pytorch_model-00003-of-00004.bin",
|
279 |
+
"transformer.h.29.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
|
280 |
+
"transformer.h.29.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
|
281 |
+
"transformer.h.29.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
282 |
+
"transformer.h.29.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
283 |
+
"transformer.h.3.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
|
284 |
+
"transformer.h.3.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
|
285 |
+
"transformer.h.3.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
286 |
+
"transformer.h.3.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
287 |
+
"transformer.h.3.ln_1.bias": "pytorch_model-00001-of-00004.bin",
|
288 |
+
"transformer.h.3.ln_1.weight": "pytorch_model-00001-of-00004.bin",
|
289 |
+
"transformer.h.3.ln_2.bias": "pytorch_model-00001-of-00004.bin",
|
290 |
+
"transformer.h.3.ln_2.weight": "pytorch_model-00001-of-00004.bin",
|
291 |
+
"transformer.h.3.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
|
292 |
+
"transformer.h.3.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
|
293 |
+
"transformer.h.3.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
294 |
+
"transformer.h.3.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
295 |
+
"transformer.h.30.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
|
296 |
+
"transformer.h.30.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
|
297 |
+
"transformer.h.30.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
298 |
+
"transformer.h.30.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
299 |
+
"transformer.h.30.ln_1.bias": "pytorch_model-00003-of-00004.bin",
|
300 |
+
"transformer.h.30.ln_1.weight": "pytorch_model-00003-of-00004.bin",
|
301 |
+
"transformer.h.30.ln_2.bias": "pytorch_model-00003-of-00004.bin",
|
302 |
+
"transformer.h.30.ln_2.weight": "pytorch_model-00003-of-00004.bin",
|
303 |
+
"transformer.h.30.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
|
304 |
+
"transformer.h.30.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
|
305 |
+
"transformer.h.30.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
306 |
+
"transformer.h.30.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
307 |
+
"transformer.h.31.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
|
308 |
+
"transformer.h.31.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
|
309 |
+
"transformer.h.31.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
310 |
+
"transformer.h.31.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
311 |
+
"transformer.h.31.ln_1.bias": "pytorch_model-00003-of-00004.bin",
|
312 |
+
"transformer.h.31.ln_1.weight": "pytorch_model-00003-of-00004.bin",
|
313 |
+
"transformer.h.31.ln_2.bias": "pytorch_model-00003-of-00004.bin",
|
314 |
+
"transformer.h.31.ln_2.weight": "pytorch_model-00003-of-00004.bin",
|
315 |
+
"transformer.h.31.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
|
316 |
+
"transformer.h.31.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
|
317 |
+
"transformer.h.31.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
318 |
+
"transformer.h.31.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
319 |
+
"transformer.h.32.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
|
320 |
+
"transformer.h.32.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
|
321 |
+
"transformer.h.32.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
322 |
+
"transformer.h.32.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
323 |
+
"transformer.h.32.ln_1.bias": "pytorch_model-00003-of-00004.bin",
|
324 |
+
"transformer.h.32.ln_1.weight": "pytorch_model-00003-of-00004.bin",
|
325 |
+
"transformer.h.32.ln_2.bias": "pytorch_model-00003-of-00004.bin",
|
326 |
+
"transformer.h.32.ln_2.weight": "pytorch_model-00003-of-00004.bin",
|
327 |
+
"transformer.h.32.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
|
328 |
+
"transformer.h.32.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
|
329 |
+
"transformer.h.32.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
330 |
+
"transformer.h.32.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
331 |
+
"transformer.h.33.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
|
332 |
+
"transformer.h.33.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
|
333 |
+
"transformer.h.33.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
334 |
+
"transformer.h.33.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
335 |
+
"transformer.h.33.ln_1.bias": "pytorch_model-00003-of-00004.bin",
|
336 |
+
"transformer.h.33.ln_1.weight": "pytorch_model-00003-of-00004.bin",
|
337 |
+
"transformer.h.33.ln_2.bias": "pytorch_model-00003-of-00004.bin",
|
338 |
+
"transformer.h.33.ln_2.weight": "pytorch_model-00003-of-00004.bin",
|
339 |
+
"transformer.h.33.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
|
340 |
+
"transformer.h.33.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
|
341 |
+
"transformer.h.33.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
342 |
+
"transformer.h.33.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
343 |
+
"transformer.h.34.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
|
344 |
+
"transformer.h.34.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
|
345 |
+
"transformer.h.34.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
346 |
+
"transformer.h.34.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
347 |
+
"transformer.h.34.ln_1.bias": "pytorch_model-00003-of-00004.bin",
|
348 |
+
"transformer.h.34.ln_1.weight": "pytorch_model-00003-of-00004.bin",
|
349 |
+
"transformer.h.34.ln_2.bias": "pytorch_model-00003-of-00004.bin",
|
350 |
+
"transformer.h.34.ln_2.weight": "pytorch_model-00003-of-00004.bin",
|
351 |
+
"transformer.h.34.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
|
352 |
+
"transformer.h.34.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
|
353 |
+
"transformer.h.34.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
354 |
+
"transformer.h.34.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
355 |
+
"transformer.h.35.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
|
356 |
+
"transformer.h.35.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
|
357 |
+
"transformer.h.35.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
358 |
+
"transformer.h.35.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
359 |
+
"transformer.h.35.ln_1.bias": "pytorch_model-00003-of-00004.bin",
|
360 |
+
"transformer.h.35.ln_1.weight": "pytorch_model-00003-of-00004.bin",
|
361 |
+
"transformer.h.35.ln_2.bias": "pytorch_model-00003-of-00004.bin",
|
362 |
+
"transformer.h.35.ln_2.weight": "pytorch_model-00003-of-00004.bin",
|
363 |
+
"transformer.h.35.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
|
364 |
+
"transformer.h.35.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
|
365 |
+
"transformer.h.35.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
366 |
+
"transformer.h.35.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
367 |
+
"transformer.h.36.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
|
368 |
+
"transformer.h.36.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
|
369 |
+
"transformer.h.36.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
370 |
+
"transformer.h.36.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
371 |
+
"transformer.h.36.ln_1.bias": "pytorch_model-00003-of-00004.bin",
|
372 |
+
"transformer.h.36.ln_1.weight": "pytorch_model-00003-of-00004.bin",
|
373 |
+
"transformer.h.36.ln_2.bias": "pytorch_model-00003-of-00004.bin",
|
374 |
+
"transformer.h.36.ln_2.weight": "pytorch_model-00003-of-00004.bin",
|
375 |
+
"transformer.h.36.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
|
376 |
+
"transformer.h.36.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
|
377 |
+
"transformer.h.36.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
378 |
+
"transformer.h.36.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
379 |
+
"transformer.h.37.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
|
380 |
+
"transformer.h.37.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
|
381 |
+
"transformer.h.37.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
382 |
+
"transformer.h.37.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
383 |
+
"transformer.h.37.ln_1.bias": "pytorch_model-00003-of-00004.bin",
|
384 |
+
"transformer.h.37.ln_1.weight": "pytorch_model-00003-of-00004.bin",
|
385 |
+
"transformer.h.37.ln_2.bias": "pytorch_model-00003-of-00004.bin",
|
386 |
+
"transformer.h.37.ln_2.weight": "pytorch_model-00003-of-00004.bin",
|
387 |
+
"transformer.h.37.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
|
388 |
+
"transformer.h.37.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
|
389 |
+
"transformer.h.37.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
390 |
+
"transformer.h.37.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
391 |
+
"transformer.h.38.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
|
392 |
+
"transformer.h.38.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
|
393 |
+
"transformer.h.38.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
394 |
+
"transformer.h.38.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
395 |
+
"transformer.h.38.ln_1.bias": "pytorch_model-00003-of-00004.bin",
|
396 |
+
"transformer.h.38.ln_1.weight": "pytorch_model-00003-of-00004.bin",
|
397 |
+
"transformer.h.38.ln_2.bias": "pytorch_model-00003-of-00004.bin",
|
398 |
+
"transformer.h.38.ln_2.weight": "pytorch_model-00003-of-00004.bin",
|
399 |
+
"transformer.h.38.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
|
400 |
+
"transformer.h.38.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
|
401 |
+
"transformer.h.38.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
402 |
+
"transformer.h.38.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
403 |
+
"transformer.h.39.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
|
404 |
+
"transformer.h.39.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
|
405 |
+
"transformer.h.39.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
406 |
+
"transformer.h.39.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
407 |
+
"transformer.h.39.ln_1.bias": "pytorch_model-00003-of-00004.bin",
|
408 |
+
"transformer.h.39.ln_1.weight": "pytorch_model-00003-of-00004.bin",
|
409 |
+
"transformer.h.39.ln_2.bias": "pytorch_model-00003-of-00004.bin",
|
410 |
+
"transformer.h.39.ln_2.weight": "pytorch_model-00003-of-00004.bin",
|
411 |
+
"transformer.h.39.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
|
412 |
+
"transformer.h.39.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
|
413 |
+
"transformer.h.39.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
414 |
+
"transformer.h.39.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
415 |
+
"transformer.h.4.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
|
416 |
+
"transformer.h.4.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
|
417 |
+
"transformer.h.4.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
418 |
+
"transformer.h.4.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
419 |
+
"transformer.h.4.ln_1.bias": "pytorch_model-00001-of-00004.bin",
|
420 |
+
"transformer.h.4.ln_1.weight": "pytorch_model-00001-of-00004.bin",
|
421 |
+
"transformer.h.4.ln_2.bias": "pytorch_model-00001-of-00004.bin",
|
422 |
+
"transformer.h.4.ln_2.weight": "pytorch_model-00001-of-00004.bin",
|
423 |
+
"transformer.h.4.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
|
424 |
+
"transformer.h.4.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
|
425 |
+
"transformer.h.4.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
426 |
+
"transformer.h.4.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
427 |
+
"transformer.h.40.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
|
428 |
+
"transformer.h.40.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
|
429 |
+
"transformer.h.40.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
430 |
+
"transformer.h.40.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
431 |
+
"transformer.h.40.ln_1.bias": "pytorch_model-00003-of-00004.bin",
|
432 |
+
"transformer.h.40.ln_1.weight": "pytorch_model-00003-of-00004.bin",
|
433 |
+
"transformer.h.40.ln_2.bias": "pytorch_model-00003-of-00004.bin",
|
434 |
+
"transformer.h.40.ln_2.weight": "pytorch_model-00003-of-00004.bin",
|
435 |
+
"transformer.h.40.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
|
436 |
+
"transformer.h.40.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
|
437 |
+
"transformer.h.40.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
438 |
+
"transformer.h.40.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
439 |
+
"transformer.h.41.attn.c_attn.bias": "pytorch_model-00003-of-00004.bin",
|
440 |
+
"transformer.h.41.attn.c_attn.weight": "pytorch_model-00003-of-00004.bin",
|
441 |
+
"transformer.h.41.attn.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
442 |
+
"transformer.h.41.attn.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
443 |
+
"transformer.h.41.ln_1.bias": "pytorch_model-00003-of-00004.bin",
|
444 |
+
"transformer.h.41.ln_1.weight": "pytorch_model-00003-of-00004.bin",
|
445 |
+
"transformer.h.41.ln_2.bias": "pytorch_model-00003-of-00004.bin",
|
446 |
+
"transformer.h.41.ln_2.weight": "pytorch_model-00003-of-00004.bin",
|
447 |
+
"transformer.h.41.mlp.c_fc.bias": "pytorch_model-00003-of-00004.bin",
|
448 |
+
"transformer.h.41.mlp.c_fc.weight": "pytorch_model-00003-of-00004.bin",
|
449 |
+
"transformer.h.41.mlp.c_proj.bias": "pytorch_model-00003-of-00004.bin",
|
450 |
+
"transformer.h.41.mlp.c_proj.weight": "pytorch_model-00003-of-00004.bin",
|
451 |
+
"transformer.h.5.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
|
452 |
+
"transformer.h.5.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
|
453 |
+
"transformer.h.5.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
454 |
+
"transformer.h.5.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
455 |
+
"transformer.h.5.ln_1.bias": "pytorch_model-00001-of-00004.bin",
|
456 |
+
"transformer.h.5.ln_1.weight": "pytorch_model-00001-of-00004.bin",
|
457 |
+
"transformer.h.5.ln_2.bias": "pytorch_model-00001-of-00004.bin",
|
458 |
+
"transformer.h.5.ln_2.weight": "pytorch_model-00001-of-00004.bin",
|
459 |
+
"transformer.h.5.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
|
460 |
+
"transformer.h.5.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
|
461 |
+
"transformer.h.5.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
462 |
+
"transformer.h.5.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
463 |
+
"transformer.h.6.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
|
464 |
+
"transformer.h.6.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
|
465 |
+
"transformer.h.6.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
466 |
+
"transformer.h.6.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
467 |
+
"transformer.h.6.ln_1.bias": "pytorch_model-00001-of-00004.bin",
|
468 |
+
"transformer.h.6.ln_1.weight": "pytorch_model-00001-of-00004.bin",
|
469 |
+
"transformer.h.6.ln_2.bias": "pytorch_model-00001-of-00004.bin",
|
470 |
+
"transformer.h.6.ln_2.weight": "pytorch_model-00001-of-00004.bin",
|
471 |
+
"transformer.h.6.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
|
472 |
+
"transformer.h.6.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
|
473 |
+
"transformer.h.6.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
474 |
+
"transformer.h.6.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
475 |
+
"transformer.h.7.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
|
476 |
+
"transformer.h.7.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
|
477 |
+
"transformer.h.7.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
478 |
+
"transformer.h.7.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
479 |
+
"transformer.h.7.ln_1.bias": "pytorch_model-00001-of-00004.bin",
|
480 |
+
"transformer.h.7.ln_1.weight": "pytorch_model-00001-of-00004.bin",
|
481 |
+
"transformer.h.7.ln_2.bias": "pytorch_model-00001-of-00004.bin",
|
482 |
+
"transformer.h.7.ln_2.weight": "pytorch_model-00001-of-00004.bin",
|
483 |
+
"transformer.h.7.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
|
484 |
+
"transformer.h.7.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
|
485 |
+
"transformer.h.7.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
486 |
+
"transformer.h.7.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
487 |
+
"transformer.h.8.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
|
488 |
+
"transformer.h.8.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
|
489 |
+
"transformer.h.8.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
490 |
+
"transformer.h.8.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
491 |
+
"transformer.h.8.ln_1.bias": "pytorch_model-00001-of-00004.bin",
|
492 |
+
"transformer.h.8.ln_1.weight": "pytorch_model-00001-of-00004.bin",
|
493 |
+
"transformer.h.8.ln_2.bias": "pytorch_model-00001-of-00004.bin",
|
494 |
+
"transformer.h.8.ln_2.weight": "pytorch_model-00001-of-00004.bin",
|
495 |
+
"transformer.h.8.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
|
496 |
+
"transformer.h.8.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
|
497 |
+
"transformer.h.8.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
498 |
+
"transformer.h.8.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
499 |
+
"transformer.h.9.attn.c_attn.bias": "pytorch_model-00001-of-00004.bin",
|
500 |
+
"transformer.h.9.attn.c_attn.weight": "pytorch_model-00001-of-00004.bin",
|
501 |
+
"transformer.h.9.attn.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
502 |
+
"transformer.h.9.attn.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
503 |
+
"transformer.h.9.ln_1.bias": "pytorch_model-00001-of-00004.bin",
|
504 |
+
"transformer.h.9.ln_1.weight": "pytorch_model-00001-of-00004.bin",
|
505 |
+
"transformer.h.9.ln_2.bias": "pytorch_model-00001-of-00004.bin",
|
506 |
+
"transformer.h.9.ln_2.weight": "pytorch_model-00001-of-00004.bin",
|
507 |
+
"transformer.h.9.mlp.c_fc.bias": "pytorch_model-00001-of-00004.bin",
|
508 |
+
"transformer.h.9.mlp.c_fc.weight": "pytorch_model-00001-of-00004.bin",
|
509 |
+
"transformer.h.9.mlp.c_proj.bias": "pytorch_model-00001-of-00004.bin",
|
510 |
+
"transformer.h.9.mlp.c_proj.weight": "pytorch_model-00001-of-00004.bin",
|
511 |
+
"transformer.ln_f.bias": "pytorch_model-00003-of-00004.bin",
|
512 |
+
"transformer.ln_f.weight": "pytorch_model-00003-of-00004.bin",
|
513 |
+
"transformer.wpe.weight": "pytorch_model-00001-of-00004.bin",
|
514 |
+
"transformer.wte.weight": "pytorch_model-00001-of-00004.bin"
|
515 |
+
}
|
516 |
+
}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_prefix_space": false,
|
3 |
+
"additional_special_tokens": [
|
4 |
+
"<|endoftext|>",
|
5 |
+
"<fim_prefix>",
|
6 |
+
"<fim_middle>",
|
7 |
+
"<fim_suffix>",
|
8 |
+
"<fim_pad>",
|
9 |
+
"<filename>",
|
10 |
+
"<gh_stars>",
|
11 |
+
"<issue_start>",
|
12 |
+
"<issue_comment>",
|
13 |
+
"<issue_closed>",
|
14 |
+
"<jupyter_start>",
|
15 |
+
"<jupyter_text>",
|
16 |
+
"<jupyter_code>",
|
17 |
+
"<jupyter_output>",
|
18 |
+
"<empty_output>",
|
19 |
+
"<commit_before>",
|
20 |
+
"<commit_msg>",
|
21 |
+
"<commit_after>",
|
22 |
+
"<reponame>"
|
23 |
+
],
|
24 |
+
"bos_token": "<|endoftext|>",
|
25 |
+
"eos_token": "<|endoftext|>",
|
26 |
+
"model_max_length": 1000000000000000019884624838656,
|
27 |
+
"tokenizer_class": "GPT2Tokenizer",
|
28 |
+
"unk_token": "<|endoftext|>",
|
29 |
+
"vocab_size": 49152
|
30 |
+
}
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|