davidlvxin commited on
Commit
5fded96
1 Parent(s): b4abb7e

Upload folder using huggingface_hub

Browse files
.mdl ADDED
Binary file (50 Bytes). View file
 
.msc ADDED
Binary file (1.11 kB). View file
 
.mv ADDED
@@ -0,0 +1 @@
 
 
1
+ Revision:master,CreatedAt:1725249855
config.json ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LlamaForCausalLM"
4
+ ],
5
+ "auto_map": {
6
+ "AutoModelForCausalLM": "modeling_llama.LlamaForCausalLM"
7
+ },
8
+ "attention_bias": false,
9
+ "attention_dropout": 0.0,
10
+ "bos_token_id": 128000,
11
+ "eos_token_id": 128001,
12
+ "hidden_act": "silu",
13
+ "hidden_size": 4096,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 14336,
16
+ "max_position_embeddings": 131072,
17
+ "mlp_bias": false,
18
+ "model_type": "llama",
19
+ "num_attention_heads": 32,
20
+ "num_hidden_layers": 32,
21
+ "num_key_value_heads": 8,
22
+ "pretraining_tp": 1,
23
+ "rms_norm_eps": 1e-05,
24
+ "rope_scaling": {
25
+ "factor": 8.0,
26
+ "low_freq_factor": 1.0,
27
+ "high_freq_factor": 4.0,
28
+ "original_max_position_embeddings": 8192,
29
+ "rope_type": "llama3"
30
+ },
31
+ "rope_theta": 500000.0,
32
+ "tie_word_embeddings": false,
33
+ "torch_dtype": "bfloat16",
34
+ "transformers_version": "4.43.0.dev0",
35
+ "use_cache": true,
36
+ "vocab_size": 128256
37
+ }
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_sample": true,
3
+ "temperature": 0.6,
4
+ "top_p": 0.9,
5
+ "_from_model_config": true,
6
+ "bos_token_id": 128000,
7
+ "eos_token_id": 128001,
8
+ "transformers_version": "4.43.0.dev0"
9
+ }
model-00000-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c8096593a3e05d06f0787d80d8296042350253012febe1c354a561af9af842da
3
+ size 3489800320
model-00001-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ff4561ef9fd1c8c260fbe3e3d31c5037728a4735b48af3308124d25a706a317
3
+ size 3489800376
model-00002-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ed4a162e0a93d2596e53712e2b67f593721dbbcb9dd9e3dcf7e7f73a7fdc3bf
3
+ size 3489800392
model-00003-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bfe3879dbb210af1b266c8610d4a4a35773a5c07a7136ca404f11c92c8576ef2
3
+ size 3489800392
model-00004-of-00005.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d89a383d572bc7dbba1f512912a7536f920ceaf33f45452991d21f829890a7a8
3
+ size 2101354824
model.safetensors.index.json ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 16060522496
4
+ },
5
+ "weight_map": {
6
+ "model.embed_tokens.weight": "model-00004-of-00005.safetensors",
7
+ "model.layers.0.input_layernorm.weight": "model-00000-of-00005.safetensors",
8
+ "model.layers.0.self_attn.q_proj.weight": "model-00000-of-00005.safetensors",
9
+ "model.layers.0.self_attn.k_proj.weight": "model-00000-of-00005.safetensors",
10
+ "model.layers.0.self_attn.v_proj.weight": "model-00000-of-00005.safetensors",
11
+ "model.layers.0.self_attn.o_proj.weight": "model-00000-of-00005.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00000-of-00005.safetensors",
13
+ "model.layers.0.mlp.gate_proj.weight": "model-00000-of-00005.safetensors",
14
+ "model.layers.0.mlp.up_proj.weight": "model-00000-of-00005.safetensors",
15
+ "model.layers.0.mlp.down_proj.weight": "model-00000-of-00005.safetensors",
16
+ "model.layers.1.input_layernorm.weight": "model-00000-of-00005.safetensors",
17
+ "model.layers.1.self_attn.q_proj.weight": "model-00000-of-00005.safetensors",
18
+ "model.layers.1.self_attn.k_proj.weight": "model-00000-of-00005.safetensors",
19
+ "model.layers.1.self_attn.v_proj.weight": "model-00000-of-00005.safetensors",
20
+ "model.layers.1.self_attn.o_proj.weight": "model-00000-of-00005.safetensors",
21
+ "model.layers.1.post_attention_layernorm.weight": "model-00000-of-00005.safetensors",
22
+ "model.layers.1.mlp.gate_proj.weight": "model-00000-of-00005.safetensors",
23
+ "model.layers.1.mlp.up_proj.weight": "model-00000-of-00005.safetensors",
24
+ "model.layers.1.mlp.down_proj.weight": "model-00000-of-00005.safetensors",
25
+ "model.layers.2.input_layernorm.weight": "model-00000-of-00005.safetensors",
26
+ "model.layers.2.self_attn.q_proj.weight": "model-00000-of-00005.safetensors",
27
+ "model.layers.2.self_attn.k_proj.weight": "model-00000-of-00005.safetensors",
28
+ "model.layers.2.self_attn.v_proj.weight": "model-00000-of-00005.safetensors",
29
+ "model.layers.2.self_attn.o_proj.weight": "model-00000-of-00005.safetensors",
30
+ "model.layers.2.post_attention_layernorm.weight": "model-00000-of-00005.safetensors",
31
+ "model.layers.2.mlp.gate_proj.weight": "model-00000-of-00005.safetensors",
32
+ "model.layers.2.mlp.up_proj.weight": "model-00000-of-00005.safetensors",
33
+ "model.layers.2.mlp.down_proj.weight": "model-00000-of-00005.safetensors",
34
+ "model.layers.3.input_layernorm.weight": "model-00000-of-00005.safetensors",
35
+ "model.layers.3.self_attn.q_proj.weight": "model-00000-of-00005.safetensors",
36
+ "model.layers.3.self_attn.k_proj.weight": "model-00000-of-00005.safetensors",
37
+ "model.layers.3.self_attn.v_proj.weight": "model-00000-of-00005.safetensors",
38
+ "model.layers.3.self_attn.o_proj.weight": "model-00000-of-00005.safetensors",
39
+ "model.layers.3.post_attention_layernorm.weight": "model-00000-of-00005.safetensors",
40
+ "model.layers.3.mlp.gate_proj.weight": "model-00000-of-00005.safetensors",
41
+ "model.layers.3.mlp.up_proj.weight": "model-00000-of-00005.safetensors",
42
+ "model.layers.3.mlp.down_proj.weight": "model-00000-of-00005.safetensors",
43
+ "model.layers.4.input_layernorm.weight": "model-00000-of-00005.safetensors",
44
+ "model.layers.4.self_attn.q_proj.weight": "model-00000-of-00005.safetensors",
45
+ "model.layers.4.self_attn.k_proj.weight": "model-00000-of-00005.safetensors",
46
+ "model.layers.4.self_attn.v_proj.weight": "model-00000-of-00005.safetensors",
47
+ "model.layers.4.self_attn.o_proj.weight": "model-00000-of-00005.safetensors",
48
+ "model.layers.4.post_attention_layernorm.weight": "model-00000-of-00005.safetensors",
49
+ "model.layers.4.mlp.gate_proj.weight": "model-00000-of-00005.safetensors",
50
+ "model.layers.4.mlp.up_proj.weight": "model-00000-of-00005.safetensors",
51
+ "model.layers.4.mlp.down_proj.weight": "model-00000-of-00005.safetensors",
52
+ "model.layers.5.input_layernorm.weight": "model-00000-of-00005.safetensors",
53
+ "model.layers.5.self_attn.q_proj.weight": "model-00000-of-00005.safetensors",
54
+ "model.layers.5.self_attn.k_proj.weight": "model-00000-of-00005.safetensors",
55
+ "model.layers.5.self_attn.v_proj.weight": "model-00000-of-00005.safetensors",
56
+ "model.layers.5.self_attn.o_proj.weight": "model-00000-of-00005.safetensors",
57
+ "model.layers.5.post_attention_layernorm.weight": "model-00000-of-00005.safetensors",
58
+ "model.layers.5.mlp.gate_proj.weight": "model-00000-of-00005.safetensors",
59
+ "model.layers.5.mlp.up_proj.weight": "model-00000-of-00005.safetensors",
60
+ "model.layers.5.mlp.down_proj.weight": "model-00000-of-00005.safetensors",
61
+ "model.layers.6.input_layernorm.weight": "model-00000-of-00005.safetensors",
62
+ "model.layers.6.self_attn.q_proj.weight": "model-00000-of-00005.safetensors",
63
+ "model.layers.6.self_attn.k_proj.weight": "model-00000-of-00005.safetensors",
64
+ "model.layers.6.self_attn.v_proj.weight": "model-00000-of-00005.safetensors",
65
+ "model.layers.6.self_attn.o_proj.weight": "model-00000-of-00005.safetensors",
66
+ "model.layers.6.post_attention_layernorm.weight": "model-00000-of-00005.safetensors",
67
+ "model.layers.6.mlp.gate_proj.weight": "model-00000-of-00005.safetensors",
68
+ "model.layers.6.mlp.up_proj.weight": "model-00000-of-00005.safetensors",
69
+ "model.layers.6.mlp.down_proj.weight": "model-00000-of-00005.safetensors",
70
+ "model.layers.7.input_layernorm.weight": "model-00000-of-00005.safetensors",
71
+ "model.layers.7.self_attn.q_proj.weight": "model-00000-of-00005.safetensors",
72
+ "model.layers.7.self_attn.k_proj.weight": "model-00000-of-00005.safetensors",
73
+ "model.layers.7.self_attn.v_proj.weight": "model-00000-of-00005.safetensors",
74
+ "model.layers.7.self_attn.o_proj.weight": "model-00000-of-00005.safetensors",
75
+ "model.layers.7.post_attention_layernorm.weight": "model-00000-of-00005.safetensors",
76
+ "model.layers.7.mlp.gate_proj.weight": "model-00000-of-00005.safetensors",
77
+ "model.layers.7.mlp.up_proj.weight": "model-00000-of-00005.safetensors",
78
+ "model.layers.7.mlp.down_proj.weight": "model-00000-of-00005.safetensors",
79
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00005.safetensors",
80
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
81
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
82
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
83
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
84
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
85
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
86
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
87
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
88
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00005.safetensors",
89
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
90
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
91
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
92
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
93
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
94
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
95
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
96
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
97
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00005.safetensors",
98
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
99
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
100
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
101
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
102
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
103
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
104
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
105
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
106
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00005.safetensors",
107
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
108
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
109
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
110
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
111
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
112
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
113
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
114
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
115
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00005.safetensors",
116
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
117
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
118
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
119
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
120
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
121
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
122
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
123
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
124
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00005.safetensors",
125
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
126
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
127
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
128
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
129
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
130
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
131
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
132
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
133
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00005.safetensors",
134
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
135
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
136
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
137
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
138
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
139
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
140
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
141
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
142
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00005.safetensors",
143
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00005.safetensors",
144
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00005.safetensors",
145
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00005.safetensors",
146
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00005.safetensors",
147
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00005.safetensors",
148
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00005.safetensors",
149
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00005.safetensors",
150
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00005.safetensors",
151
+ "model.layers.16.input_layernorm.weight": "model-00002-of-00005.safetensors",
152
+ "model.layers.16.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
153
+ "model.layers.16.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
154
+ "model.layers.16.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
155
+ "model.layers.16.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
156
+ "model.layers.16.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
157
+ "model.layers.16.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
158
+ "model.layers.16.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
159
+ "model.layers.16.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
160
+ "model.layers.17.input_layernorm.weight": "model-00002-of-00005.safetensors",
161
+ "model.layers.17.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
162
+ "model.layers.17.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
163
+ "model.layers.17.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
164
+ "model.layers.17.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
165
+ "model.layers.17.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
166
+ "model.layers.17.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
167
+ "model.layers.17.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
168
+ "model.layers.17.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
169
+ "model.layers.18.input_layernorm.weight": "model-00002-of-00005.safetensors",
170
+ "model.layers.18.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
171
+ "model.layers.18.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
172
+ "model.layers.18.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
173
+ "model.layers.18.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
174
+ "model.layers.18.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
175
+ "model.layers.18.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
176
+ "model.layers.18.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
177
+ "model.layers.18.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
178
+ "model.layers.19.input_layernorm.weight": "model-00002-of-00005.safetensors",
179
+ "model.layers.19.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
180
+ "model.layers.19.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
181
+ "model.layers.19.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
182
+ "model.layers.19.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
183
+ "model.layers.19.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
184
+ "model.layers.19.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
185
+ "model.layers.19.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
186
+ "model.layers.19.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
187
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00005.safetensors",
188
+ "model.layers.20.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
189
+ "model.layers.20.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
190
+ "model.layers.20.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
191
+ "model.layers.20.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
192
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
193
+ "model.layers.20.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
194
+ "model.layers.20.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
195
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
196
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00005.safetensors",
197
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
198
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
199
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
200
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
201
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
202
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
203
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
204
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
205
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00005.safetensors",
206
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
207
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
208
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
209
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
210
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
211
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
212
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
213
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
214
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00005.safetensors",
215
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00005.safetensors",
216
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00005.safetensors",
217
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00005.safetensors",
218
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00005.safetensors",
219
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00005.safetensors",
220
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00005.safetensors",
221
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00005.safetensors",
222
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00005.safetensors",
223
+ "model.layers.24.input_layernorm.weight": "model-00003-of-00005.safetensors",
224
+ "model.layers.24.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
225
+ "model.layers.24.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
226
+ "model.layers.24.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
227
+ "model.layers.24.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
228
+ "model.layers.24.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
229
+ "model.layers.24.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
230
+ "model.layers.24.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
231
+ "model.layers.24.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
232
+ "model.layers.25.input_layernorm.weight": "model-00003-of-00005.safetensors",
233
+ "model.layers.25.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
234
+ "model.layers.25.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
235
+ "model.layers.25.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
236
+ "model.layers.25.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
237
+ "model.layers.25.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
238
+ "model.layers.25.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
239
+ "model.layers.25.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
240
+ "model.layers.25.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
241
+ "model.layers.26.input_layernorm.weight": "model-00003-of-00005.safetensors",
242
+ "model.layers.26.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
243
+ "model.layers.26.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
244
+ "model.layers.26.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
245
+ "model.layers.26.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
246
+ "model.layers.26.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
247
+ "model.layers.26.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
248
+ "model.layers.26.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
249
+ "model.layers.26.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
250
+ "model.layers.27.input_layernorm.weight": "model-00003-of-00005.safetensors",
251
+ "model.layers.27.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
252
+ "model.layers.27.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
253
+ "model.layers.27.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
254
+ "model.layers.27.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
255
+ "model.layers.27.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
256
+ "model.layers.27.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
257
+ "model.layers.27.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
258
+ "model.layers.27.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
259
+ "model.layers.28.input_layernorm.weight": "model-00003-of-00005.safetensors",
260
+ "model.layers.28.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
261
+ "model.layers.28.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
262
+ "model.layers.28.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
263
+ "model.layers.28.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
264
+ "model.layers.28.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
265
+ "model.layers.28.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
266
+ "model.layers.28.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
267
+ "model.layers.28.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
268
+ "model.layers.29.input_layernorm.weight": "model-00003-of-00005.safetensors",
269
+ "model.layers.29.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
270
+ "model.layers.29.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
271
+ "model.layers.29.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
272
+ "model.layers.29.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
273
+ "model.layers.29.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
274
+ "model.layers.29.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
275
+ "model.layers.29.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
276
+ "model.layers.29.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
277
+ "model.layers.30.input_layernorm.weight": "model-00003-of-00005.safetensors",
278
+ "model.layers.30.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
279
+ "model.layers.30.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
280
+ "model.layers.30.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
281
+ "model.layers.30.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
282
+ "model.layers.30.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
283
+ "model.layers.30.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
284
+ "model.layers.30.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
285
+ "model.layers.30.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
286
+ "model.layers.31.input_layernorm.weight": "model-00003-of-00005.safetensors",
287
+ "model.layers.31.self_attn.q_proj.weight": "model-00003-of-00005.safetensors",
288
+ "model.layers.31.self_attn.k_proj.weight": "model-00003-of-00005.safetensors",
289
+ "model.layers.31.self_attn.v_proj.weight": "model-00003-of-00005.safetensors",
290
+ "model.layers.31.self_attn.o_proj.weight": "model-00003-of-00005.safetensors",
291
+ "model.layers.31.post_attention_layernorm.weight": "model-00003-of-00005.safetensors",
292
+ "model.layers.31.mlp.gate_proj.weight": "model-00003-of-00005.safetensors",
293
+ "model.layers.31.mlp.up_proj.weight": "model-00003-of-00005.safetensors",
294
+ "model.layers.31.mlp.down_proj.weight": "model-00003-of-00005.safetensors",
295
+ "model.norm.weight": "model-00004-of-00005.safetensors",
296
+ "lm_head.weight": "model-00004-of-00005.safetensors"
297
+ }
298
+ }
modeling_llama.py ADDED
@@ -0,0 +1,1414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
5
+ # and OPT implementations in this library. It has been modified from its
6
+ # original forms to accommodate minor architectural differences compared
7
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
8
+ #
9
+ # Licensed under the Apache License, Version 2.0 (the "License");
10
+ # you may not use this file except in compliance with the License.
11
+ # You may obtain a copy of the License at
12
+ #
13
+ # http://www.apache.org/licenses/LICENSE-2.0
14
+ #
15
+ # Unless required by applicable law or agreed to in writing, software
16
+ # distributed under the License is distributed on an "AS IS" BASIS,
17
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18
+ # See the License for the specific language governing permissions and
19
+ # limitations under the License.
20
+ import math
21
+ from typing import Optional, Tuple, Union, List, Callable, Dict, Any
22
+
23
+ import torch
24
+ import torch.nn.functional as F
25
+ import torch.utils.checkpoint
26
+ from torch import nn
27
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
28
+
29
+ from transformers.activations import ACT2FN
30
+ from transformers.cache_utils import Cache, DynamicCache, StaticCache
31
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter
32
+ from transformers.modeling_flash_attention_utils import _flash_attention_forward
33
+ from transformers.modeling_outputs import (
34
+ BaseModelOutputWithPast,
35
+ CausalLMOutputWithPast,
36
+ QuestionAnsweringModelOutput,
37
+ SequenceClassifierOutputWithPast,
38
+ TokenClassifierOutput,
39
+ )
40
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS
41
+ from transformers.modeling_utils import PreTrainedModel
42
+ from transformers.pytorch_utils import ALL_LAYERNORM_LAYERS
43
+ from transformers.utils import (
44
+ add_start_docstrings,
45
+ add_start_docstrings_to_model_forward,
46
+ is_flash_attn_greater_or_equal_2_10,
47
+ logging,
48
+ replace_return_docstrings,
49
+ )
50
+ from transformers import LlamaConfig
51
+ from nltk.tokenize import PunktSentenceTokenizer
52
+ import re
53
+
54
+ logger = logging.get_logger(__name__)
55
+
56
+ _CONFIG_FOR_DOC = "LlamaConfig"
57
+
58
+
59
+ class LlamaRMSNorm(nn.Module):
60
+ def __init__(self, hidden_size, eps=1e-6):
61
+ """
62
+ LlamaRMSNorm is equivalent to T5LayerNorm
63
+ """
64
+ super().__init__()
65
+ self.weight = nn.Parameter(torch.ones(hidden_size))
66
+ self.variance_epsilon = eps
67
+
68
+ def forward(self, hidden_states):
69
+ input_dtype = hidden_states.dtype
70
+ hidden_states = hidden_states.to(torch.float32)
71
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
72
+ hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
73
+ return self.weight * hidden_states.to(input_dtype)
74
+
75
+
76
+ ALL_LAYERNORM_LAYERS.append(LlamaRMSNorm)
77
+
78
+
79
+ class LlamaRotaryEmbedding(nn.Module):
80
+ def __init__(
81
+ self,
82
+ dim=None,
83
+ max_position_embeddings=2048,
84
+ base=10000,
85
+ device=None,
86
+ scaling_factor=1.0,
87
+ rope_type="default",
88
+ config: Optional[LlamaConfig] = None,
89
+ ):
90
+ super().__init__()
91
+ # TODO (joao): remove the `if` below, only used for BC
92
+ self.rope_kwargs = {}
93
+ if config is None:
94
+ logger.warning_once(
95
+ "`LlamaRotaryEmbedding` can now be fully parameterized by passing the model config through the "
96
+ "`config` argument. All other arguments will be removed in v4.45"
97
+ )
98
+ self.rope_kwargs = {
99
+ "rope_type": rope_type,
100
+ "factor": scaling_factor,
101
+ "dim": dim,
102
+ "base": base,
103
+ "max_position_embeddings": max_position_embeddings,
104
+ }
105
+ self.rope_type = rope_type
106
+ self.max_seq_len_cached = max_position_embeddings
107
+ self.original_max_seq_len = max_position_embeddings
108
+ else:
109
+ # BC: "rope_type" was originally "type"
110
+ if config.rope_scaling is not None:
111
+ self.rope_type = config.rope_scaling.get("rope_type", config.rope_scaling.get("type"))
112
+ else:
113
+ self.rope_type = "default"
114
+ self.max_seq_len_cached = config.max_position_embeddings
115
+ self.original_max_seq_len = config.max_position_embeddings
116
+
117
+ self.config = config
118
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
119
+
120
+ inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device, **self.rope_kwargs)
121
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
122
+ self.original_inv_freq = self.inv_freq
123
+
124
+ def _dynamic_frequency_update(self, position_ids, device):
125
+ """
126
+ dynamic RoPE layers should recompute `inv_freq` in the following situations:
127
+ 1 - growing beyond the cached sequence length (allow scaling)
128
+ 2 - the current sequence length is in the original scale (avoid losing precision with small sequences)
129
+ """
130
+ seq_len = torch.max(position_ids) + 1
131
+ if seq_len > self.max_seq_len_cached: # growth
132
+ inv_freq, self.attention_scaling = self.rope_init_fn(
133
+ self.config, device, seq_len=seq_len, **self.rope_kwargs
134
+ )
135
+ self.register_buffer("inv_freq", inv_freq, persistent=False) # TODO joao: may break with compilation
136
+ self.max_seq_len_cached = seq_len
137
+
138
+ if seq_len < self.original_max_seq_len and self.max_seq_len_cached > self.original_max_seq_len: # reset
139
+ self.register_buffer("inv_freq", self.original_inv_freq, persistent=False)
140
+ self.max_seq_len_cached = self.original_max_seq_len
141
+
142
+ @torch.no_grad()
143
+ def forward(self, x, position_ids):
144
+ if "dynamic" in self.rope_type:
145
+ self._dynamic_frequency_update(position_ids, device=x.device)
146
+ # Core RoPE block
147
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1)
148
+ position_ids_expanded = position_ids[:, None, :].float()
149
+ # Force float32 (see https://github.com/huggingface/transformers/pull/29285)
150
+ device_type = x.device.type
151
+ device_type = device_type if isinstance(device_type, str) and device_type != "mps" else "cpu"
152
+ with torch.autocast(device_type=device_type, enabled=False):
153
+ freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
154
+ emb = torch.cat((freqs, freqs), dim=-1)
155
+ cos = emb.cos()
156
+ sin = emb.sin()
157
+ # Advanced RoPE types (e.g. yarn) apply a post-processing scaling factor, equivalent to scaling attention
158
+ cos = cos * self.attention_scaling
159
+ sin = sin * self.attention_scaling
160
+
161
+ return cos.to(dtype=x.dtype), sin.to(dtype=x.dtype)
162
+
163
+
164
+ class LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding):
165
+ """LlamaRotaryEmbedding extended with linear scaling. Credits to the Reddit user /u/kaiokendev"""
166
+
167
+ def __init__(self, *args, **kwargs):
168
+ logger.warning_once(
169
+ "`LlamaLinearScalingRotaryEmbedding` is deprecated an will be removed in v4.45. Please use "
170
+ "`LlamaRotaryEmbedding`, which now also does linear scaling (simply pass the model config to __init__)."
171
+ )
172
+ kwargs["rope_type"] = "linear"
173
+ super().__init__(*args, **kwargs)
174
+
175
+
176
+ class LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding):
177
+ """LlamaRotaryEmbedding extended with Dynamic NTK scaling. Credits to the Reddit users /u/bloc97 and /u/emozilla"""
178
+
179
+ def __init__(self, *args, **kwargs):
180
+ logger.warning_once(
181
+ "`LlamaDynamicNTKScalingRotaryEmbedding` is deprecated an will be removed in v4.45. Please use "
182
+ "`LlamaRotaryEmbedding`, which now also does dynamic ntk scaling (simply pass the model config to "
183
+ "__init__)."
184
+ )
185
+ kwargs["rope_type"] = "dynamic"
186
+ super().__init__(*args, **kwargs)
187
+
188
+
189
+ def rotate_half(x):
190
+ """Rotates half the hidden dims of the input."""
191
+ x1 = x[..., : x.shape[-1] // 2]
192
+ x2 = x[..., x.shape[-1] // 2 :]
193
+ return torch.cat((-x2, x1), dim=-1)
194
+
195
+
196
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
197
+ """Applies Rotary Position Embedding to the query and key tensors.
198
+
199
+ Args:
200
+ q (`torch.Tensor`): The query tensor.
201
+ k (`torch.Tensor`): The key tensor.
202
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
203
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
204
+ position_ids (`torch.Tensor`, *optional*):
205
+ Deprecated and unused.
206
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
207
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
208
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
209
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
210
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
211
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
212
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
213
+ Returns:
214
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
215
+ """
216
+ cos = cos.unsqueeze(unsqueeze_dim)
217
+ sin = sin.unsqueeze(unsqueeze_dim)
218
+
219
+ q_embed = (q * cos) + (rotate_half(q) * sin)
220
+ k_embed = (k * cos) + (rotate_half(k) * sin)
221
+ return q_embed, k_embed
222
+
223
+
224
+ class LlamaMLP(nn.Module):
225
+ def __init__(self, config):
226
+ super().__init__()
227
+ self.config = config
228
+ self.hidden_size = config.hidden_size
229
+ self.intermediate_size = config.intermediate_size
230
+ self.gate_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
231
+ self.up_proj = nn.Linear(self.hidden_size, self.intermediate_size, bias=config.mlp_bias)
232
+ self.down_proj = nn.Linear(self.intermediate_size, self.hidden_size, bias=config.mlp_bias)
233
+ self.act_fn = ACT2FN[config.hidden_act]
234
+
235
+ def forward(self, x):
236
+ if self.config.pretraining_tp > 1:
237
+ slice = self.intermediate_size // self.config.pretraining_tp
238
+ gate_proj_slices = self.gate_proj.weight.split(slice, dim=0)
239
+ up_proj_slices = self.up_proj.weight.split(slice, dim=0)
240
+ down_proj_slices = self.down_proj.weight.split(slice, dim=1)
241
+
242
+ gate_proj = torch.cat(
243
+ [F.linear(x, gate_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1
244
+ )
245
+ up_proj = torch.cat([F.linear(x, up_proj_slices[i]) for i in range(self.config.pretraining_tp)], dim=-1)
246
+
247
+ intermediate_states = (self.act_fn(gate_proj) * up_proj).split(slice, dim=2)
248
+ down_proj = [
249
+ F.linear(intermediate_states[i], down_proj_slices[i]) for i in range(self.config.pretraining_tp)
250
+ ]
251
+ down_proj = sum(down_proj)
252
+ else:
253
+ down_proj = self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
254
+
255
+ return down_proj
256
+
257
+
258
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
259
+ """
260
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
261
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
262
+ """
263
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
264
+ if n_rep == 1:
265
+ return hidden_states
266
+ hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
267
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
268
+
269
+
270
+ class LlamaAttention(nn.Module):
271
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
272
+
273
+ def __init__(self, config: LlamaConfig, layer_idx: Optional[int] = None):
274
+ super().__init__()
275
+ self.config = config
276
+ self.layer_idx = layer_idx
277
+ if layer_idx is None:
278
+ logger.warning_once(
279
+ f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
280
+ "lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
281
+ "when creating this class."
282
+ )
283
+
284
+ self.attention_dropout = config.attention_dropout
285
+ self.hidden_size = config.hidden_size
286
+ self.num_heads = config.num_attention_heads
287
+ self.head_dim = self.hidden_size // self.num_heads
288
+ self.num_key_value_heads = config.num_key_value_heads
289
+ self.num_key_value_groups = self.num_heads // self.num_key_value_heads
290
+ self.max_position_embeddings = config.max_position_embeddings
291
+ self.rope_theta = config.rope_theta
292
+ self.is_causal = True
293
+
294
+ if (self.head_dim * self.num_heads) != self.hidden_size:
295
+ raise ValueError(
296
+ f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
297
+ f" and `num_heads`: {self.num_heads})."
298
+ )
299
+
300
+ self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=config.attention_bias)
301
+ self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
302
+ self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=config.attention_bias)
303
+ self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=config.attention_bias)
304
+
305
+ # TODO (joao): remove in v4.45 (RoPE is computed in the model, not in the decoder layers)
306
+ self.rotary_emb = LlamaRotaryEmbedding(config=self.config)
307
+
308
+ def forward(
309
+ self,
310
+ hidden_states: torch.Tensor,
311
+ attention_mask: Optional[torch.Tensor] = None,
312
+ position_ids: Optional[torch.LongTensor] = None,
313
+ past_key_value: Optional[Cache] = None,
314
+ output_attentions: bool = False,
315
+ use_cache: bool = False,
316
+ cache_position: Optional[torch.LongTensor] = None,
317
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.45
318
+ **kwargs,
319
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
320
+ bsz, q_len, _ = hidden_states.size()
321
+
322
+ if self.config.pretraining_tp > 1:
323
+ key_value_slicing = (self.num_key_value_heads * self.head_dim) // self.config.pretraining_tp
324
+ query_slices = self.q_proj.weight.split(
325
+ (self.num_heads * self.head_dim) // self.config.pretraining_tp, dim=0
326
+ )
327
+ key_slices = self.k_proj.weight.split(key_value_slicing, dim=0)
328
+ value_slices = self.v_proj.weight.split(key_value_slicing, dim=0)
329
+
330
+ query_states = [F.linear(hidden_states, query_slices[i]) for i in range(self.config.pretraining_tp)]
331
+ query_states = torch.cat(query_states, dim=-1)
332
+
333
+ key_states = [F.linear(hidden_states, key_slices[i]) for i in range(self.config.pretraining_tp)]
334
+ key_states = torch.cat(key_states, dim=-1)
335
+
336
+ value_states = [F.linear(hidden_states, value_slices[i]) for i in range(self.config.pretraining_tp)]
337
+ value_states = torch.cat(value_states, dim=-1)
338
+
339
+ else:
340
+ query_states = self.q_proj(hidden_states)
341
+ key_states = self.k_proj(hidden_states)
342
+ value_states = self.v_proj(hidden_states)
343
+
344
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
345
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
346
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
347
+
348
+ if position_embeddings is None:
349
+ logger.warning_once(
350
+ "The attention layers in this model are transitioning from computing the RoPE embeddings internally "
351
+ "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
352
+ "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.45 `position_ids` will be "
353
+ "removed and `position_embeddings` will be mandatory."
354
+ )
355
+ cos, sin = self.rotary_emb(value_states, position_ids)
356
+ else:
357
+ cos, sin = position_embeddings
358
+
359
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
360
+
361
+ if past_key_value is not None:
362
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
363
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
364
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
365
+
366
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
367
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
368
+
369
+ attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
370
+
371
+ if attention_mask is not None: # no matter the length, we just slice it
372
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
373
+ attn_weights = attn_weights + causal_mask
374
+
375
+ # upcast attention to fp32
376
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
377
+ attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
378
+ attn_output = torch.matmul(attn_weights, value_states)
379
+
380
+ if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
381
+ raise ValueError(
382
+ f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
383
+ f" {attn_output.size()}"
384
+ )
385
+
386
+ attn_output = attn_output.transpose(1, 2).contiguous()
387
+
388
+ attn_output = attn_output.reshape(bsz, q_len, -1)
389
+
390
+ if self.config.pretraining_tp > 1:
391
+ attn_output = attn_output.split(self.hidden_size // self.config.pretraining_tp, dim=2)
392
+ o_proj_slices = self.o_proj.weight.split(self.hidden_size // self.config.pretraining_tp, dim=1)
393
+ attn_output = sum([F.linear(attn_output[i], o_proj_slices[i]) for i in range(self.config.pretraining_tp)])
394
+ else:
395
+ attn_output = self.o_proj(attn_output)
396
+
397
+ if not output_attentions:
398
+ attn_weights = None
399
+
400
+ return attn_output, attn_weights, past_key_value
401
+
402
+
403
+ class LlamaFlashAttention2(LlamaAttention):
404
+ """
405
+ Llama flash attention module. This module inherits from `LlamaAttention` as the weights of the module stays
406
+ untouched. The only required change would be on the forward pass where it needs to correctly call the public API of
407
+ flash attention and deal with padding tokens in case the input contains any of them.
408
+ """
409
+
410
+ def __init__(self, *args, **kwargs):
411
+ super().__init__(*args, **kwargs)
412
+
413
+ # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
414
+ # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
415
+ # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
416
+ self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
417
+
418
+ def forward(
419
+ self,
420
+ hidden_states: torch.Tensor,
421
+ attention_mask: Optional[torch.LongTensor] = None,
422
+ position_ids: Optional[torch.LongTensor] = None,
423
+ past_key_value: Optional[Cache] = None,
424
+ output_attentions: bool = False,
425
+ use_cache: bool = False,
426
+ cache_position: Optional[torch.LongTensor] = None,
427
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.45
428
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
429
+ if isinstance(past_key_value, StaticCache):
430
+ raise ValueError(
431
+ "`static` cache implementation is not compatible with `attn_implementation==flash_attention_2` "
432
+ "make sure to use `sdpa` in the mean time, and open an issue at https://github.com/huggingface/transformers"
433
+ )
434
+
435
+ output_attentions = False
436
+
437
+ bsz, q_len, _ = hidden_states.size()
438
+
439
+ query_states = self.q_proj(hidden_states)
440
+ key_states = self.k_proj(hidden_states)
441
+ value_states = self.v_proj(hidden_states)
442
+
443
+ # Flash attention requires the input to have the shape
444
+ # batch_size x seq_length x head_dim x hidden_dim
445
+ # therefore we just need to keep the original shape
446
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
447
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
448
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
449
+
450
+ if position_embeddings is None:
451
+ logger.warning_once(
452
+ "The attention layers in this model are transitioning from computing the RoPE embeddings internally "
453
+ "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
454
+ "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.45 `position_ids` will be "
455
+ "removed and `position_embeddings` will be mandatory."
456
+ )
457
+ cos, sin = self.rotary_emb(value_states, position_ids)
458
+ else:
459
+ cos, sin = position_embeddings
460
+
461
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
462
+
463
+ if past_key_value is not None:
464
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
465
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
466
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
467
+
468
+ # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
469
+ # to be able to avoid many of these transpose/reshape/view.
470
+ query_states = query_states.transpose(1, 2)
471
+ key_states = key_states.transpose(1, 2)
472
+ value_states = value_states.transpose(1, 2)
473
+
474
+ dropout_rate = self.attention_dropout if self.training else 0.0
475
+
476
+ # In PEFT, usually we cast the layer norms in float32 for training stability reasons
477
+ # therefore the input hidden states gets silently casted in float32. Hence, we need
478
+ # cast them back in the correct dtype just to be sure everything works as expected.
479
+ # This might slowdown training & inference so it is recommended to not cast the LayerNorms
480
+ # in fp32. (LlamaRMSNorm handles it correctly)
481
+
482
+ input_dtype = query_states.dtype
483
+ if input_dtype == torch.float32:
484
+ if torch.is_autocast_enabled():
485
+ target_dtype = torch.get_autocast_gpu_dtype()
486
+ # Handle the case where the model is quantized
487
+ elif hasattr(self.config, "_pre_quantization_dtype"):
488
+ target_dtype = self.config._pre_quantization_dtype
489
+ else:
490
+ target_dtype = self.q_proj.weight.dtype
491
+
492
+ logger.warning_once(
493
+ f"The input hidden states seems to be silently casted in float32, this might be related to"
494
+ f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in"
495
+ f" {target_dtype}."
496
+ )
497
+
498
+ query_states = query_states.to(target_dtype)
499
+ key_states = key_states.to(target_dtype)
500
+ value_states = value_states.to(target_dtype)
501
+
502
+ attn_output = _flash_attention_forward(
503
+ query_states,
504
+ key_states,
505
+ value_states,
506
+ attention_mask,
507
+ q_len,
508
+ dropout=dropout_rate,
509
+ sliding_window=getattr(self, "sliding_window", None),
510
+ use_top_left_mask=self._flash_attn_uses_top_left_mask,
511
+ is_causal=self.is_causal,
512
+ )
513
+
514
+ attn_output = attn_output.reshape(bsz, q_len, -1).contiguous()
515
+ attn_output = self.o_proj(attn_output)
516
+
517
+ if not output_attentions:
518
+ attn_weights = None
519
+
520
+ return attn_output, attn_weights, past_key_value
521
+
522
+
523
+ class LlamaSdpaAttention(LlamaAttention):
524
+ """
525
+ Llama attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from
526
+ `LlamaAttention` as the weights of the module stays untouched. The only changes are on the forward pass to adapt to
527
+ SDPA API.
528
+ """
529
+
530
+ # Adapted from LlamaAttention.forward
531
+ def forward(
532
+ self,
533
+ hidden_states: torch.Tensor,
534
+ attention_mask: Optional[torch.Tensor] = None,
535
+ position_ids: Optional[torch.LongTensor] = None,
536
+ past_key_value: Optional[Cache] = None,
537
+ output_attentions: bool = False,
538
+ use_cache: bool = False,
539
+ cache_position: Optional[torch.LongTensor] = None,
540
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.45
541
+ **kwargs,
542
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
543
+ if output_attentions:
544
+ # TODO: Improve this warning with e.g. `model.config.attn_implementation = "manual"` once this is implemented.
545
+ logger.warning_once(
546
+ "LlamaModel is using LlamaSdpaAttention, but `torch.nn.functional.scaled_dot_product_attention` does not support `output_attentions=True`. Falling back to the manual attention implementation, "
547
+ 'but specifying the manual implementation will be required from Transformers version v5.0.0 onwards. This warning can be removed using the argument `attn_implementation="eager"` when loading the model.'
548
+ )
549
+ return super().forward(
550
+ hidden_states=hidden_states,
551
+ attention_mask=attention_mask,
552
+ position_ids=position_ids,
553
+ past_key_value=past_key_value,
554
+ output_attentions=output_attentions,
555
+ use_cache=use_cache,
556
+ cache_position=cache_position,
557
+ position_embeddings=position_embeddings,
558
+ )
559
+
560
+ bsz, q_len, _ = hidden_states.size()
561
+ # print(hidden_states.sum())
562
+ query_states = self.q_proj(hidden_states)
563
+ key_states = self.k_proj(hidden_states)
564
+ value_states = self.v_proj(hidden_states)
565
+ # print(query_states.sum() + key_states.sum() + value_states.sum())
566
+ query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
567
+ key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
568
+ value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
569
+
570
+ if position_embeddings is None:
571
+ logger.warning_once(
572
+ "The attention layers in this model are transitioning from computing the RoPE embeddings internally "
573
+ "through `position_ids` (2D tensor with the indexes of the tokens), to using externally computed "
574
+ "`position_embeddings` (Tuple of tensors, containing cos and sin). In v4.45 `position_ids` will be "
575
+ "removed and `position_embeddings` will be mandatory."
576
+ )
577
+ cos, sin = self.rotary_emb(value_states, position_ids)
578
+ else:
579
+ cos, sin = position_embeddings
580
+
581
+ # print(query_states.size(), key_states.size())
582
+ # print(query_states.sum(), key_states.sum(), value_states.sum())
583
+ query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin)
584
+ # print(query_states.sum(), key_states.sum())
585
+ # exit()
586
+
587
+ if past_key_value is not None:
588
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
589
+ cache_kwargs = {"sin": sin, "cos": cos, "cache_position": cache_position}
590
+ key_states, value_states = past_key_value.update(key_states, value_states, self.layer_idx, cache_kwargs)
591
+
592
+ key_states = repeat_kv(key_states, self.num_key_value_groups)
593
+ value_states = repeat_kv(value_states, self.num_key_value_groups)
594
+
595
+ causal_mask = attention_mask
596
+ if attention_mask is not None:
597
+ causal_mask = causal_mask[:, :, :, : key_states.shape[-2]]
598
+
599
+ # SDPA with memory-efficient backend is currently (torch==2.1.2) bugged with non-contiguous inputs with custom attn_mask,
600
+ # Reference: https://github.com/pytorch/pytorch/issues/112577.
601
+ if query_states.device.type == "cuda" and causal_mask is not None:
602
+ query_states = query_states.contiguous()
603
+ key_states = key_states.contiguous()
604
+ value_states = value_states.contiguous()
605
+
606
+ # We dispatch to SDPA's Flash Attention or Efficient kernels via this `is_causal` if statement instead of an inline conditional assignment
607
+ # in SDPA to support both torch.compile's dynamic shapes and full graph options. An inline conditional prevents dynamic shapes from compiling.
608
+ is_causal = True if causal_mask is None and q_len > 1 else False
609
+
610
+ attn_output = torch.nn.functional.scaled_dot_product_attention(
611
+ query_states,
612
+ key_states,
613
+ value_states,
614
+ attn_mask=causal_mask,
615
+ dropout_p=self.attention_dropout if self.training else 0.0,
616
+ is_causal=is_causal,
617
+ )
618
+
619
+ attn_output = attn_output.transpose(1, 2).contiguous()
620
+ attn_output = attn_output.view(bsz, q_len, -1)
621
+
622
+ attn_output = self.o_proj(attn_output)
623
+
624
+ return attn_output, None, past_key_value
625
+
626
+
627
+ LLAMA_ATTENTION_CLASSES = {
628
+ "eager": LlamaAttention,
629
+ "flash_attention_2": LlamaFlashAttention2,
630
+ "sdpa": LlamaSdpaAttention,
631
+ }
632
+
633
+
634
+ class LlamaDecoderLayer(nn.Module):
635
+ def __init__(self, config: LlamaConfig, layer_idx: int):
636
+ super().__init__()
637
+ self.hidden_size = config.hidden_size
638
+
639
+ self.self_attn = LLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
640
+ self.mlp = LlamaMLP(config)
641
+ self.input_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
642
+ self.post_attention_layernorm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
643
+
644
+ def forward(
645
+ self,
646
+ hidden_states: torch.Tensor,
647
+ attention_mask: Optional[torch.Tensor] = None,
648
+ position_ids: Optional[torch.LongTensor] = None,
649
+ past_key_value: Optional[Cache] = None,
650
+ output_attentions: Optional[bool] = False,
651
+ use_cache: Optional[bool] = False,
652
+ cache_position: Optional[torch.LongTensor] = None,
653
+ position_embeddings: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, # will become mandatory in v4.45
654
+ **kwargs,
655
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
656
+ """
657
+ Args:
658
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
659
+ attention_mask (`torch.FloatTensor`, *optional*):
660
+ attention mask of size `(batch_size, sequence_length)` if flash attention is used or `(batch_size, 1,
661
+ query_sequence_length, key_sequence_length)` if default attention is used.
662
+ output_attentions (`bool`, *optional*):
663
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
664
+ returned tensors for more detail.
665
+ use_cache (`bool`, *optional*):
666
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
667
+ (see `past_key_values`).
668
+ past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states
669
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
670
+ Indices depicting the position of the input sequence tokens in the sequence
671
+ position_embeddings (`Tuple[torch.FloatTensor, torch.FloatTensor]`, *optional*):
672
+ Tuple containing the cosine and sine positional embeddings of shape `(batch_size, seq_len, head_dim)`,
673
+ with `head_dim` being the embedding dimension of each attention head.
674
+ kwargs (`dict`, *optional*):
675
+ Arbitrary kwargs to be ignored, used for FSDP and other methods that injects code
676
+ into the model
677
+ """
678
+ residual = hidden_states
679
+ # print(hidden_states.float().sum())
680
+ hidden_states = self.input_layernorm(hidden_states)
681
+ # print(hidden_states.float().sum())
682
+
683
+ # Self Attention
684
+ hidden_states, self_attn_weights, present_key_value = self.self_attn(
685
+ hidden_states=hidden_states,
686
+ attention_mask=attention_mask,
687
+ position_ids=position_ids,
688
+ past_key_value=past_key_value,
689
+ output_attentions=output_attentions,
690
+ use_cache=use_cache,
691
+ cache_position=cache_position,
692
+ position_embeddings=position_embeddings,
693
+ **kwargs,
694
+ )
695
+ hidden_states = residual + hidden_states
696
+
697
+ # Fully Connected
698
+ residual = hidden_states
699
+ hidden_states = self.post_attention_layernorm(hidden_states)
700
+ hidden_states = self.mlp(hidden_states)
701
+ hidden_states = residual + hidden_states
702
+
703
+ outputs = (hidden_states,)
704
+
705
+ if output_attentions:
706
+ outputs += (self_attn_weights,)
707
+
708
+ if use_cache:
709
+ outputs += (present_key_value,)
710
+
711
+ return outputs
712
+
713
+
714
+ LLAMA_START_DOCSTRING = r"""
715
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
716
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
717
+ etc.)
718
+
719
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
720
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
721
+ and behavior.
722
+
723
+ Parameters:
724
+ config ([`LlamaConfig`]):
725
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
726
+ load the weights associated with the model, only the configuration. Check out the
727
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
728
+ """
729
+
730
+
731
+ @add_start_docstrings(
732
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
733
+ LLAMA_START_DOCSTRING,
734
+ )
735
+ class LlamaPreTrainedModel(PreTrainedModel):
736
+ config_class = LlamaConfig
737
+ base_model_prefix = "model"
738
+ supports_gradient_checkpointing = True
739
+ _no_split_modules = ["LlamaDecoderLayer"]
740
+ _skip_keys_device_placement = ["past_key_values"]
741
+ _supports_flash_attn_2 = True
742
+ _supports_sdpa = True
743
+ _supports_cache_class = True
744
+ _supports_quantized_cache = True
745
+ _supports_static_cache = True
746
+
747
+ def _init_weights(self, module):
748
+ std = self.config.initializer_range
749
+ if isinstance(module, nn.Linear):
750
+ module.weight.data.normal_(mean=0.0, std=std)
751
+ if module.bias is not None:
752
+ module.bias.data.zero_()
753
+ elif isinstance(module, nn.Embedding):
754
+ module.weight.data.normal_(mean=0.0, std=std)
755
+ if module.padding_idx is not None:
756
+ module.weight.data[module.padding_idx].zero_()
757
+
758
+
759
+ LLAMA_INPUTS_DOCSTRING = r"""
760
+ Args:
761
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
762
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
763
+ it.
764
+
765
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
766
+ [`PreTrainedTokenizer.__call__`] for details.
767
+
768
+ [What are input IDs?](../glossary#input-ids)
769
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
770
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
771
+
772
+ - 1 for tokens that are **not masked**,
773
+ - 0 for tokens that are **masked**.
774
+
775
+ [What are attention masks?](../glossary#attention-mask)
776
+
777
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
778
+ [`PreTrainedTokenizer.__call__`] for details.
779
+
780
+ If `past_key_values` is used, optionally only the last `input_ids` have to be input (see
781
+ `past_key_values`).
782
+
783
+ If you want to change padding behavior, you should read [`modeling_opt._prepare_decoder_attention_mask`]
784
+ and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more
785
+ information on the default strategy.
786
+
787
+ - 1 indicates the head is **not masked**,
788
+ - 0 indicates the head is **masked**.
789
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
790
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
791
+ config.n_positions - 1]`.
792
+
793
+ [What are position IDs?](../glossary#position-ids)
794
+ past_key_values (`Cache` or `tuple(tuple(torch.FloatTensor))`, *optional*):
795
+ Pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
796
+ blocks) that can be used to speed up sequential decoding. This typically consists in the `past_key_values`
797
+ returned by the model at a previous stage of decoding, when `use_cache=True` or `config.use_cache=True`.
798
+
799
+ Two formats are allowed:
800
+ - a [`~cache_utils.Cache`] instance;
801
+ - Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of
802
+ shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`). This is also known as the legacy
803
+ cache format.
804
+
805
+ The model will output the same cache format that is fed as input. If no `past_key_values` are passed, the
806
+ legacy cache format will be returned.
807
+
808
+ If `past_key_values` are used, the user can optionally input only the last `input_ids` (those that don't
809
+ have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `input_ids`
810
+ of shape `(batch_size, sequence_length)`.
811
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
812
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
813
+ is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
814
+ model's internal embedding lookup matrix.
815
+ use_cache (`bool`, *optional*):
816
+ If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
817
+ `past_key_values`).
818
+ output_attentions (`bool`, *optional*):
819
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
820
+ tensors for more detail.
821
+ output_hidden_states (`bool`, *optional*):
822
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
823
+ more detail.
824
+ return_dict (`bool`, *optional*):
825
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
826
+ cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
827
+ Indices depicting the position of the input sequence tokens in the sequence. Contrarily to `position_ids`,
828
+ this tensor is not affected by padding. It is used to update the cache in the correct position and to infer
829
+ the complete sequence length.
830
+ """
831
+
832
+
833
+ @add_start_docstrings(
834
+ "The bare LLaMA Model outputting raw hidden-states without any specific head on top.",
835
+ LLAMA_START_DOCSTRING,
836
+ )
837
+ class LlamaModel(LlamaPreTrainedModel):
838
+ """
839
+ Transformer decoder consisting of *config.num_hidden_layers* layers. Each layer is a [`LlamaDecoderLayer`]
840
+
841
+ Args:
842
+ config: LlamaConfig
843
+ """
844
+
845
+ def __init__(self, config: LlamaConfig):
846
+ super().__init__(config)
847
+ self.padding_idx = config.pad_token_id
848
+ self.vocab_size = config.vocab_size
849
+
850
+ self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
851
+ self.layers = nn.ModuleList(
852
+ [LlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
853
+ )
854
+ self.norm = LlamaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
855
+ self.rotary_emb = LlamaRotaryEmbedding(config=config)
856
+ self.gradient_checkpointing = False
857
+
858
+ # Initialize weights and apply final processing
859
+ self.post_init()
860
+
861
+ def get_input_embeddings(self):
862
+ return self.embed_tokens
863
+
864
+ def set_input_embeddings(self, value):
865
+ self.embed_tokens = value
866
+
867
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
868
+ def forward(
869
+ self,
870
+ input_ids: torch.LongTensor = None,
871
+ attention_mask: Optional[torch.Tensor] = None,
872
+ position_ids: Optional[torch.LongTensor] = None,
873
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
874
+ inputs_embeds: Optional[torch.FloatTensor] = None,
875
+ use_cache: Optional[bool] = None,
876
+ output_attentions: Optional[bool] = None,
877
+ output_hidden_states: Optional[bool] = None,
878
+ return_dict: Optional[bool] = None,
879
+ cache_position: Optional[torch.LongTensor] = None,
880
+ ) -> Union[Tuple, BaseModelOutputWithPast]:
881
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
882
+ output_hidden_states = (
883
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
884
+ )
885
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
886
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
887
+
888
+ if (input_ids is None) ^ (inputs_embeds is not None):
889
+ raise ValueError(
890
+ "You cannot specify both input_ids and inputs_embeds at the same time, and must specify either one"
891
+ )
892
+
893
+ if self.gradient_checkpointing and self.training and use_cache:
894
+ logger.warning_once(
895
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
896
+ )
897
+ use_cache = False
898
+
899
+ if inputs_embeds is None:
900
+ inputs_embeds = self.embed_tokens(input_ids)
901
+
902
+ return_legacy_cache = False
903
+ if (
904
+ use_cache and not isinstance(past_key_values, Cache) and not self.training
905
+ ): # kept for BC (non `Cache` `past_key_values` inputs)
906
+ return_legacy_cache = True
907
+ past_key_values = DynamicCache.from_legacy_cache(past_key_values)
908
+ logger.warning_once(
909
+ "We detected that you are passing `past_key_values` as a tuple and this is deprecated and will be removed in v4.43. "
910
+ "Please use an appropriate `Cache` class (https://huggingface.co/docs/transformers/v4.41.3/en/internal/generation_utils#transformers.Cache)"
911
+ )
912
+
913
+ if cache_position is None:
914
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
915
+ cache_position = torch.arange(
916
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
917
+ )
918
+ if position_ids is None:
919
+ position_ids = cache_position.unsqueeze(0)
920
+
921
+ causal_mask = self._update_causal_mask(
922
+ attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
923
+ )
924
+ hidden_states = inputs_embeds
925
+
926
+ # create position embeddings to be shared across the decoder layers
927
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
928
+
929
+ # decoder layers
930
+ all_hidden_states = () if output_hidden_states else None
931
+ all_self_attns = () if output_attentions else None
932
+ next_decoder_cache = None
933
+
934
+ for decoder_layer in self.layers:
935
+ if output_hidden_states:
936
+ all_hidden_states += (hidden_states,)
937
+
938
+ if self.gradient_checkpointing and self.training:
939
+ layer_outputs = self._gradient_checkpointing_func(
940
+ decoder_layer.__call__,
941
+ hidden_states,
942
+ causal_mask,
943
+ position_ids,
944
+ past_key_values,
945
+ output_attentions,
946
+ use_cache,
947
+ cache_position,
948
+ position_embeddings,
949
+ )
950
+ else:
951
+ layer_outputs = decoder_layer(
952
+ hidden_states,
953
+ attention_mask=causal_mask,
954
+ position_ids=position_ids,
955
+ past_key_value=past_key_values,
956
+ output_attentions=output_attentions,
957
+ use_cache=use_cache,
958
+ cache_position=cache_position,
959
+ position_embeddings=position_embeddings,
960
+ )
961
+
962
+ hidden_states = layer_outputs[0]
963
+
964
+ if use_cache:
965
+ next_decoder_cache = layer_outputs[2 if output_attentions else 1]
966
+
967
+ if output_attentions:
968
+ all_self_attns += (layer_outputs[1],)
969
+
970
+ hidden_states = self.norm(hidden_states)
971
+
972
+ # add hidden states from the last decoder layer
973
+ if output_hidden_states:
974
+ all_hidden_states += (hidden_states,)
975
+
976
+ next_cache = next_decoder_cache if use_cache else None
977
+ if return_legacy_cache:
978
+ next_cache = next_cache.to_legacy_cache()
979
+
980
+ if not return_dict:
981
+ return tuple(v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None)
982
+ return BaseModelOutputWithPast(
983
+ last_hidden_state=hidden_states,
984
+ past_key_values=next_cache,
985
+ hidden_states=all_hidden_states,
986
+ attentions=all_self_attns,
987
+ )
988
+
989
+ def _update_causal_mask(
990
+ self,
991
+ attention_mask: torch.Tensor,
992
+ input_tensor: torch.Tensor,
993
+ cache_position: torch.Tensor,
994
+ past_key_values: Cache,
995
+ output_attentions: bool,
996
+ ):
997
+ # TODO: As of torch==2.2.0, the `attention_mask` passed to the model in `generate` is 2D and of dynamic length even when the static
998
+ # KV cache is used. This is an issue for torch.compile which then recaptures cudagraphs at each decode steps due to the dynamic shapes.
999
+ # (`recording cudagraph tree for symint key 13`, etc.), which is VERY slow. A workaround is `@torch.compiler.disable`, but this prevents using
1000
+ # `fullgraph=True`. See more context in https://github.com/huggingface/transformers/pull/29114
1001
+
1002
+ if self.config._attn_implementation == "flash_attention_2":
1003
+ if attention_mask is not None and 0.0 in attention_mask:
1004
+ return attention_mask
1005
+ return None
1006
+
1007
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
1008
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
1009
+ # to infer the attention mask.
1010
+ past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
1011
+ using_static_cache = isinstance(past_key_values, StaticCache)
1012
+
1013
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
1014
+ if self.config._attn_implementation == "sdpa" and not using_static_cache and not output_attentions:
1015
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
1016
+ attention_mask,
1017
+ inputs_embeds=input_tensor,
1018
+ past_key_values_length=past_seen_tokens,
1019
+ is_training=self.training,
1020
+ ):
1021
+ return None
1022
+
1023
+ dtype, device = input_tensor.dtype, input_tensor.device
1024
+ min_dtype = torch.finfo(dtype).min
1025
+ sequence_length = input_tensor.shape[1]
1026
+ if using_static_cache:
1027
+ target_length = past_key_values.get_max_length()
1028
+ else:
1029
+ target_length = (
1030
+ attention_mask.shape[-1]
1031
+ if isinstance(attention_mask, torch.Tensor)
1032
+ else past_seen_tokens + sequence_length + 1
1033
+ )
1034
+
1035
+ if attention_mask is not None and attention_mask.dim() == 4:
1036
+ # in this case we assume that the mask comes already in inverted form and requires no inversion or slicing
1037
+ if attention_mask.max() != 0:
1038
+ raise ValueError("Custom 4D attention mask should be passed in inverted form with max==0`")
1039
+ causal_mask = attention_mask
1040
+ else:
1041
+ causal_mask = torch.full(
1042
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=device
1043
+ )
1044
+ if sequence_length != 1:
1045
+ causal_mask = torch.triu(causal_mask, diagonal=1)
1046
+ causal_mask *= torch.arange(target_length, device=device) > cache_position.reshape(-1, 1)
1047
+ causal_mask = causal_mask[None, None, :, :].expand(input_tensor.shape[0], 1, -1, -1)
1048
+ if attention_mask is not None:
1049
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
1050
+ mask_length = attention_mask.shape[-1]
1051
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :]
1052
+ padding_mask = padding_mask == 0
1053
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
1054
+ padding_mask, min_dtype
1055
+ )
1056
+ if (
1057
+ self.config._attn_implementation == "sdpa"
1058
+ and attention_mask is not None
1059
+ and attention_mask.device.type == "cuda"
1060
+ and not output_attentions
1061
+ ):
1062
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
1063
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
1064
+ # Details: https://github.com/pytorch/pytorch/issues/110213
1065
+ causal_mask = AttentionMaskConverter._unmask_unattended(causal_mask, min_dtype)
1066
+
1067
+ return causal_mask
1068
+
1069
+
1070
+ class LlamaForCausalLM(LlamaPreTrainedModel):
1071
+ _tied_weights_keys = ["lm_head.weight"]
1072
+
1073
+ def __init__(self, config):
1074
+ super().__init__(config)
1075
+ self.model = LlamaModel(config)
1076
+ self.vocab_size = config.vocab_size
1077
+ self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
1078
+
1079
+ # Initialize weights and apply final processing
1080
+ self.post_init()
1081
+
1082
+ def get_input_embeddings(self):
1083
+ return self.model.embed_tokens
1084
+
1085
+ def set_input_embeddings(self, value):
1086
+ self.model.embed_tokens = value
1087
+
1088
+ def get_output_embeddings(self):
1089
+ return self.lm_head
1090
+
1091
+ def set_output_embeddings(self, new_embeddings):
1092
+ self.lm_head = new_embeddings
1093
+
1094
+ def set_decoder(self, decoder):
1095
+ self.model = decoder
1096
+
1097
+ def get_decoder(self):
1098
+ return self.model
1099
+
1100
+ @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)
1101
+ @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)
1102
+ def forward(
1103
+ self,
1104
+ input_ids: torch.LongTensor = None,
1105
+ attention_mask: Optional[torch.Tensor] = None,
1106
+ position_ids: Optional[torch.LongTensor] = None,
1107
+ past_key_values: Optional[Union[Cache, List[torch.FloatTensor]]] = None,
1108
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1109
+ labels: Optional[torch.LongTensor] = None,
1110
+ use_cache: Optional[bool] = None,
1111
+ output_attentions: Optional[bool] = None,
1112
+ output_hidden_states: Optional[bool] = None,
1113
+ return_dict: Optional[bool] = None,
1114
+ cache_position: Optional[torch.LongTensor] = None,
1115
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
1116
+ r"""
1117
+ Args:
1118
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1119
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1120
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1121
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1122
+
1123
+ Returns:
1124
+
1125
+ Example:
1126
+
1127
+ ```python
1128
+ >>> from transformers import AutoTokenizer, LlamaForCausalLM
1129
+
1130
+ >>> model = LlamaForCausalLM.from_pretrained("meta-llama/Llama-2-7b-hf")
1131
+ >>> tokenizer = AutoTokenizer.from_pretrained("meta-llama/Llama-2-7b-hf")
1132
+
1133
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1134
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1135
+
1136
+ >>> # Generate
1137
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1138
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1139
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1140
+ ```"""
1141
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1142
+ output_hidden_states = (
1143
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1144
+ )
1145
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1146
+
1147
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1148
+ outputs = self.model(
1149
+ input_ids=input_ids,
1150
+ attention_mask=attention_mask,
1151
+ position_ids=position_ids,
1152
+ past_key_values=past_key_values,
1153
+ inputs_embeds=inputs_embeds,
1154
+ use_cache=use_cache,
1155
+ output_attentions=output_attentions,
1156
+ output_hidden_states=output_hidden_states,
1157
+ return_dict=return_dict,
1158
+ cache_position=cache_position,
1159
+ )
1160
+
1161
+ hidden_states = outputs[0]
1162
+ if self.config.pretraining_tp > 1:
1163
+ lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)
1164
+ logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]
1165
+ logits = torch.cat(logits, dim=-1)
1166
+ else:
1167
+ logits = self.lm_head(hidden_states)
1168
+ logits = logits.float()
1169
+
1170
+ loss = None
1171
+ if labels is not None:
1172
+ # Shift so that tokens < n predict n
1173
+ shift_logits = logits[..., :-1, :].contiguous()
1174
+ shift_labels = labels[..., 1:].contiguous()
1175
+ # Flatten the tokens
1176
+ loss_fct = CrossEntropyLoss()
1177
+ shift_logits = shift_logits.view(-1, self.config.vocab_size)
1178
+ shift_labels = shift_labels.view(-1)
1179
+ # Enable model parallelism
1180
+ shift_labels = shift_labels.to(shift_logits.device)
1181
+ loss = loss_fct(shift_logits, shift_labels)
1182
+
1183
+ if not return_dict:
1184
+ output = (logits,) + outputs[1:]
1185
+ return (loss,) + output if loss is not None else output
1186
+
1187
+ return CausalLMOutputWithPast(
1188
+ loss=loss,
1189
+ logits=logits,
1190
+ past_key_values=outputs.past_key_values,
1191
+ hidden_states=outputs.hidden_states,
1192
+ attentions=outputs.attentions,
1193
+ )
1194
+
1195
+ def prepare_inputs_for_generation(
1196
+ self,
1197
+ input_ids,
1198
+ past_key_values=None,
1199
+ attention_mask=None,
1200
+ inputs_embeds=None,
1201
+ cache_position=None,
1202
+ position_ids=None,
1203
+ use_cache=True,
1204
+ **kwargs,
1205
+ ):
1206
+ # If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
1207
+ # Exception 1: when passing input_embeds, input_ids may be missing entries
1208
+ # Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
1209
+ if past_key_values is not None:
1210
+ if inputs_embeds is not None: # Exception 1
1211
+ input_ids = input_ids[:, -cache_position.shape[0] :]
1212
+ elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2)
1213
+ input_ids = input_ids[:, cache_position]
1214
+
1215
+ if attention_mask is not None and position_ids is None:
1216
+ # create position_ids on the fly for batch generation
1217
+ position_ids = attention_mask.long().cumsum(-1) - 1
1218
+ position_ids.masked_fill_(attention_mask == 0, 1)
1219
+ if past_key_values:
1220
+ position_ids = position_ids[:, -input_ids.shape[1] :]
1221
+
1222
+ # if `inputs_embeds` are passed, we only want to use them in the 1st generation step
1223
+ if inputs_embeds is not None and cache_position[0] == 0:
1224
+ model_inputs = {"inputs_embeds": inputs_embeds}
1225
+ else:
1226
+ model_inputs = {"input_ids": input_ids.contiguous()} # `contiguous()` needed for compilation use cases
1227
+
1228
+ model_inputs.update(
1229
+ {
1230
+ "position_ids": position_ids,
1231
+ "cache_position": cache_position,
1232
+ "past_key_values": past_key_values,
1233
+ "use_cache": use_cache,
1234
+ "attention_mask": attention_mask,
1235
+ }
1236
+ )
1237
+ return model_inputs
1238
+
1239
+ @torch.inference_mode()
1240
+ def chat(self, tokenizer, query: str, history: List[Dict] = None, role: str = "user",
1241
+ max_length: int = 131072, num_beams=1, do_sample=True, top_p=0.7, temperature=0.95,
1242
+ **kwargs):
1243
+ if history is None:
1244
+ history = []
1245
+ gen_kwargs = {"max_length": max_length, "num_beams": num_beams, "do_sample": do_sample, "top_p": top_p,
1246
+ "temperature": temperature, **kwargs}
1247
+ inputs = tokenizer.build_chat_input(query, history=history, role=role)
1248
+ del inputs['token_type_ids']
1249
+ # print(inputs)
1250
+ inputs = inputs.to(self.device)
1251
+ eos_token_id = [tokenizer.eos_token_id, tokenizer.get_command("<|user|>"),
1252
+ tokenizer.get_command("<|observation|>")]
1253
+ outputs = self.generate(**inputs, **gen_kwargs, eos_token_id=eos_token_id)
1254
+ outputs = outputs.tolist()[0][len(inputs["input_ids"][0]):-1]
1255
+ response = tokenizer.decode(outputs).strip()
1256
+ history.append({"role": role, "content": query})
1257
+ return response, history
1258
+
1259
+ def query_longcite(self, context, query, tokenizer, max_input_length=128000, max_new_tokens=1024, temperature=0.95):
1260
+
1261
+ def text_split_by_punctuation(original_text, return_dict=False):
1262
+ # text = re.sub(r'([a-z])\.([A-Z])', r'\1. \2', original_text) # separate period without space
1263
+ text = original_text
1264
+ custom_sent_tokenizer = PunktSentenceTokenizer(text)
1265
+ punctuations = r"([。;!?])" # For Chinese support
1266
+
1267
+ separated = custom_sent_tokenizer.tokenize(text)
1268
+ separated = sum([re.split(punctuations, s) for s in separated], [])
1269
+ # Put the punctuations back to the sentence
1270
+ for i in range(1, len(separated)):
1271
+ if re.match(punctuations, separated[i]):
1272
+ separated[i-1] += separated[i]
1273
+ separated[i] = ''
1274
+
1275
+ separated = [s for s in separated if s != ""]
1276
+ if len(separated) == 1:
1277
+ separated = original_text.split('\n\n')
1278
+ separated = [s.strip() for s in separated if s.strip() != ""]
1279
+ if not return_dict:
1280
+ return separated
1281
+ else:
1282
+ pos = 0
1283
+ res = []
1284
+ for i, sent in enumerate(separated):
1285
+ st = original_text.find(sent, pos)
1286
+ assert st != -1, sent
1287
+ ed = st + len(sent)
1288
+ res.append(
1289
+ {
1290
+ 'c_idx': i,
1291
+ 'content': sent,
1292
+ 'start_idx': st,
1293
+ 'end_idx': ed,
1294
+ }
1295
+ )
1296
+ pos = ed
1297
+ return res
1298
+
1299
+ def get_prompt(context, question):
1300
+ sents = text_split_by_punctuation(context, return_dict=True)
1301
+ splited_context = ""
1302
+ for i, s in enumerate(sents):
1303
+ st, ed = s['start_idx'], s['end_idx']
1304
+ assert s['content'] == context[st:ed], s
1305
+ ed = sents[i+1]['start_idx'] if i < len(sents)-1 else len(context)
1306
+ sents[i] = {
1307
+ 'content': context[st:ed],
1308
+ 'start': st,
1309
+ 'end': ed,
1310
+ 'c_idx': s['c_idx'],
1311
+ }
1312
+ splited_context += f"<C{i}>"+context[st:ed]
1313
+ prompt = '''Please answer the user's question based on the following document. When a sentence S in your response uses information from some chunks in the document (i.e., <C{s1}>-<C_{e1}>, <C{s2}>-<C{e2}>, ...), please append these chunk numbers to S in the format "<statement>{S}<cite>[{s1}-{e1}][{s2}-{e2}]...</cite></statement>". You must answer in the same language as the user's question.\n\n[Document Start]\n%s\n[Document End]\n\n%s''' % (splited_context, question)
1314
+ return prompt, sents, splited_context
1315
+
1316
+ def get_citations(statement, sents):
1317
+ c_texts = re.findall(r'<cite>(.*?)</cite>', statement, re.DOTALL)
1318
+ spans = sum([re.findall(r"\[([0-9]+\-[0-9]+)\]", c_text, re.DOTALL) for c_text in c_texts], [])
1319
+ statement = re.sub(r'<cite>(.*?)</cite>', '', statement, flags=re.DOTALL)
1320
+ merged_citations = []
1321
+ for i, s in enumerate(spans):
1322
+ try:
1323
+ st, ed = [int(x) for x in s.split('-')]
1324
+ if st > len(sents) - 1 or ed < st:
1325
+ continue
1326
+ st, ed = max(0, st), min(ed, len(sents)-1)
1327
+ assert st <= ed, str(c_texts) + '\t' + str(len(sents))
1328
+ if len(merged_citations) > 0 and st == merged_citations[-1]['end_sentence_idx'] + 1:
1329
+ merged_citations[-1].update({
1330
+ "end_sentence_idx": ed,
1331
+ 'end_char_idx': sents[ed]['end'],
1332
+ 'cite': ''.join([x['content'] for x in sents[merged_citations[-1]['start_sentence_idx']:ed+1]]),
1333
+ })
1334
+ else:
1335
+ merged_citations.append({
1336
+ "start_sentence_idx": st,
1337
+ "end_sentence_idx": ed,
1338
+ "start_char_idx": sents[st]['start'],
1339
+ 'end_char_idx': sents[ed]['end'],
1340
+ 'cite': ''.join([x['content'] for x in sents[st:ed+1]]),
1341
+ })
1342
+ except:
1343
+ print(c_texts, len(sents), statement)
1344
+ raise
1345
+ return statement, merged_citations[:3]
1346
+
1347
+ def postprocess(answer, sents, splited_context):
1348
+ res = []
1349
+ pos = 0
1350
+ new_answer = ""
1351
+ while True:
1352
+ st = answer.find("<statement>", pos)
1353
+ if st == -1:
1354
+ st = len(answer)
1355
+ ed = answer.find("</statement>", st)
1356
+ statement = answer[pos:st]
1357
+ if len(statement.strip()) > 5:
1358
+ res.append({
1359
+ "statement": statement,
1360
+ "citation": []
1361
+ })
1362
+ new_answer += f"<statement>{statement}<cite></cite></statement>"
1363
+ else:
1364
+ res.append({
1365
+ "statement": statement,
1366
+ "citation": None,
1367
+ })
1368
+ new_answer += statement
1369
+
1370
+ if ed == -1:
1371
+ break
1372
+
1373
+ statement = answer[st+len("<statement>"):ed]
1374
+ if len(statement.strip()) > 0:
1375
+ statement, citations = get_citations(statement, sents)
1376
+ res.append({
1377
+ "statement": statement,
1378
+ "citation": citations
1379
+ })
1380
+ c_str = ''.join(['[{}-{}]'.format(c['start_sentence_idx'], c['end_sentence_idx']) for c in citations])
1381
+ new_answer += f"<statement>{statement}<cite>{c_str}</cite></statement>"
1382
+ else:
1383
+ res.append({
1384
+ "statement": statement,
1385
+ "citation": None,
1386
+ })
1387
+ new_answer += statement
1388
+ pos = ed + len("</statement>")
1389
+ return {
1390
+ "answer": new_answer.strip(),
1391
+ "statements_with_citations": [x for x in res if x['citation'] is not None],
1392
+ "splited_context": splited_context.strip(),
1393
+ "all_statements": res,
1394
+ }
1395
+
1396
+ def truncate_from_middle(prompt, max_input_length=None, tokenizer=None):
1397
+ if max_input_length is None:
1398
+ return prompt
1399
+ else:
1400
+ assert tokenizer is not None
1401
+ tokenized_prompt = tokenizer.encode(prompt, add_special_tokens=False)
1402
+ if len(tokenized_prompt) > max_input_length:
1403
+ half = int(max_input_length/2)
1404
+ prompt = tokenizer.decode(tokenized_prompt[:half], skip_special_tokens=True)+tokenizer.decode(tokenized_prompt[-half:], skip_special_tokens=True)
1405
+ return prompt
1406
+
1407
+ prompt, sents, splited_context = get_prompt(context, query)
1408
+ prompt = truncate_from_middle(prompt, max_input_length, tokenizer)
1409
+ output, _ = self.chat(tokenizer, prompt, history=[], max_new_tokens=max_new_tokens, temperature=temperature)
1410
+ result = postprocess(output, sents, splited_context)
1411
+ return result
1412
+
1413
+
1414
+
tiktoken_tokenizer.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import regex as re
2
+ import base64
3
+ import tiktoken
4
+ import os
5
+ import json
6
+ from transformers import PreTrainedTokenizer
7
+
8
+ class BaseTokenizer(PreTrainedTokenizer):
9
+ """Abstract class for tokenizer."""
10
+
11
+ def __init__(self, **kwargs):
12
+ super().__init__()
13
+
14
+ @property
15
+ def add_prefix_space(self):
16
+ return False
17
+
18
+ @property
19
+ def vocab_size(self):
20
+ raise NotImplemented
21
+
22
+ def tokenize(self, text):
23
+ raise NotImplemented
24
+
25
+ def detokenize(self, token_ids, ignore_special_tokens=True):
26
+ raise NotImplemented
27
+
28
+ def build_single_message(self, role, metadata, message):
29
+ assert role in ["system", "user", "assistant", "observation"], role
30
+ role_tokens = [self.get_command(f"<|{role}|>")] + self.tokenizer.encode(f"{metadata}\n")
31
+ message_tokens = self.tokenizer.encode(message, disallowed_special=())
32
+ tokens = role_tokens + message_tokens
33
+ return tokens
34
+
35
+ def build_chat_input(self, query, history=None, role="user", metadata=""):
36
+ if history is None:
37
+ history = []
38
+ input_ids = []
39
+ for item in history:
40
+ content = item["content"]
41
+ if item["role"] == "system" and "tools" in item:
42
+ content = content + "\n" + json.dumps(item["tools"], indent=4, ensure_ascii=False)
43
+ input_ids.extend(self.build_single_message(item["role"], item.get("metadata", ""), content))
44
+ input_ids.extend(self.build_single_message(role, metadata, query))
45
+ input_ids.extend([self.get_command("<|assistant|>")])
46
+ return self.batch_encode_plus([input_ids], return_tensors="pt", is_split_into_words=True)
47
+
48
+ @property
49
+ def eos_id(self):
50
+ raise NotImplemented
51
+
52
+ def get_command(self, token):
53
+ return NotImplemented
54
+
55
+ class TikTokenizer(BaseTokenizer):
56
+ @staticmethod
57
+ def from_pretrained(path, *inputs, **kwargs):
58
+ return TikTokenizer(vocab_file=os.path.join(path, "tokenizer.tiktoken"))
59
+
60
+ def __init__(self, vocab_file=None):
61
+ pat_str = "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+"
62
+ self.pat_str = re.compile(pat_str)
63
+
64
+ self.b64_vocab = {}
65
+ if vocab_file is not None:
66
+ mergeable_ranks = {}
67
+ with open(vocab_file) as f:
68
+ for line in f:
69
+ token, rank = line.strip().split()
70
+ rank = int(rank)
71
+ token = base64.b64decode(token)
72
+ mergeable_ranks[token] = rank
73
+ self.b64_vocab['%s' % token] = rank
74
+
75
+ self.special_tokens = ["<|endoftext|>", "[MASK]", "[gMASK]", "[sMASK]", "<sop>", "<eop>", "<|system|>",
76
+ "<|user|>", "<|assistant|>", "<|observation|>"]
77
+ self.special_tokens = {
78
+ token: idx for idx, token in enumerate(self.special_tokens, start=len(mergeable_ranks))
79
+ }
80
+ self.special_token_ids = {idx: token for token, idx in self.special_tokens.items()}
81
+
82
+ self.tokenizer = tiktoken.Encoding(
83
+ name="my_tokenizer",
84
+ pat_str=pat_str,
85
+ mergeable_ranks=mergeable_ranks,
86
+ special_tokens=self.special_tokens
87
+ )
88
+ self.decoder = {rank: token for token, rank in mergeable_ranks.items()}
89
+ self.n_words = len(self.decoder) + len(self.special_tokens)
90
+ super().__init__()
91
+
92
+ @property
93
+ def add_prefix_space(self):
94
+ return False
95
+
96
+ def tokenize(self, text, add_special_tokens=True):
97
+ ids = self.encode(text, add_special_tokens=add_special_tokens)
98
+ return [self.convert_id_to_token(_id) for _id in ids]
99
+
100
+ def detokenize(self, ids, ignore_special_tokens=True):
101
+ if ignore_special_tokens:
102
+ ids = [idx for idx in ids if idx not in self.special_token_ids]
103
+ return self.tokenizer.decode(ids)
104
+
105
+ def encode(self, text, add_special_tokens=True):
106
+ ids = self.tokenizer.encode(text, disallowed_special=(), allowed_special="all")
107
+ if add_special_tokens:
108
+ ids = [self.special_tokens["[gMASK]"], self.special_tokens["<sop>"]] + ids
109
+ return ids
110
+
111
+ def decode(self, ids, skip_special_tokens=False, clean_up_tokenization_spaces=False):
112
+ if type(ids) is int:
113
+ ids = [ids]
114
+ return self.detokenize(ids, ignore_special_tokens=skip_special_tokens)
115
+
116
+ def encode_pieces(self, text):
117
+ ids = self.tokenizer.encode(text, disallowed_special=())
118
+ return list(map(lambda x: self.decoder[x].detokenize('utf-8', errors='replace'), ids))
119
+
120
+ @property
121
+ def vocab_size(self):
122
+ return self.n_words
123
+
124
+ @property
125
+ def eos_token_id(self):
126
+ return self.special_tokens["<|endoftext|>"]
127
+
128
+ def convert_token_to_id(self, token):
129
+ """ Converts a token (str) in an id using the vocab. """
130
+ if token in self.special_tokens:
131
+ return self.special_tokens[token]
132
+ # assert type(token) == str, "type of token (%s) is %s" % (token, type(token))
133
+ # ids = self.tokenizer.encode(token, disallowed_special=())
134
+ if token in self.b64_vocab:
135
+ return self.b64_vocab[token]
136
+ # if len(ids) == 1:
137
+ # return ids[0]
138
+ else:
139
+ raise RuntimeError(f"{token} is not a single token")
140
+
141
+ def _convert_token_to_id(self, token):
142
+ return self.convert_token_to_id(token)
143
+
144
+ def convert_id_to_token(self, index):
145
+ if index in self.special_token_ids:
146
+ return self.special_token_ids[index]
147
+ return '%s' % self.decoder[index]
148
+ # try:
149
+ # return self.decoder[index].decode('utf-8')
150
+ # except Exception as e:
151
+ # print("Exception: %s for (%d)%s" % (e, index, self.decoder[index]))
152
+ # return ""
153
+ #return self.decoder[index].detokenize('utf-8', errors='replace')
154
+
155
+ def _convert_id_to_token(self, index):
156
+ return self.convert_id_to_token(index)
157
+
158
+ def get_command(self, token):
159
+ return self.special_tokens[token]
160
+
161
+ def get_vocab(self):
162
+ vocab = {self._convert_id_to_token(i): i for i in range(self.vocab_size)}
163
+ return vocab
tokenizer.tiktoken ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "name_or_path": "THUDM/chatglm4-130b",
3
+ "remove_space": false,
4
+ "do_lower_case": false,
5
+ "tokenizer_class": "TikTokenizer",
6
+ "auto_map": {
7
+ "AutoTokenizer": [
8
+ null,
9
+ "tiktoken_tokenizer.TikTokenizer"
10
+ ]
11
+ }
12
+ }
vllm_inference.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from vllm import LLM, SamplingParams
3
+ from nltk.tokenize import PunktSentenceTokenizer
4
+ import re
5
+ import torch
6
+
7
+ class LongCiteModel(LLM):
8
+
9
+ @torch.inference_mode()
10
+ def chat(self, tokenizer, query: str, history=None, role="user",
11
+ max_new_tokens=None, top_p=0.7, temperature=0.95):
12
+ if history is None:
13
+ history = []
14
+ inputs = tokenizer.build_chat_input(query, history=history, role=role)
15
+ eos_token_id = [tokenizer.eos_token_id, tokenizer.get_command("<|user|>"), tokenizer.get_command("<|observation|>")]
16
+ generation_params = SamplingParams(
17
+ temperature=temperature,
18
+ top_p=top_p,
19
+ max_tokens=max_new_tokens,
20
+ stop_token_ids=eos_token_id,
21
+ )
22
+ input_ids = inputs.input_ids[0].tolist()
23
+ outputs = self.generate(sampling_params=generation_params, prompt_token_ids=[input_ids])
24
+ response = tokenizer.decode(outputs[0].outputs[0].token_ids[:-1])
25
+ history.append({"role": role, "content": query})
26
+ return response, history
27
+
28
+ def query_longcite(self, context, query, tokenizer, max_input_length=128000, max_new_tokens=1024, temperature=0.95):
29
+
30
+ def text_split_by_punctuation(original_text, return_dict=False):
31
+ # text = re.sub(r'([a-z])\.([A-Z])', r'\1. \2', original_text) # separate period without space
32
+ text = original_text
33
+ custom_sent_tokenizer = PunktSentenceTokenizer(text)
34
+ punctuations = r"([。;!?])" # For Chinese support
35
+
36
+ separated = custom_sent_tokenizer.tokenize(text)
37
+ separated = sum([re.split(punctuations, s) for s in separated], [])
38
+ # Put the punctuations back to the sentence
39
+ for i in range(1, len(separated)):
40
+ if re.match(punctuations, separated[i]):
41
+ separated[i-1] += separated[i]
42
+ separated[i] = ''
43
+
44
+ separated = [s for s in separated if s != ""]
45
+ if len(separated) == 1:
46
+ separated = original_text.split('\n\n')
47
+ separated = [s.strip() for s in separated if s.strip() != ""]
48
+ if not return_dict:
49
+ return separated
50
+ else:
51
+ pos = 0
52
+ res = []
53
+ for i, sent in enumerate(separated):
54
+ st = original_text.find(sent, pos)
55
+ assert st != -1, sent
56
+ ed = st + len(sent)
57
+ res.append(
58
+ {
59
+ 'c_idx': i,
60
+ 'content': sent,
61
+ 'start_idx': st,
62
+ 'end_idx': ed,
63
+ }
64
+ )
65
+ pos = ed
66
+ return res
67
+
68
+ def get_prompt(context, question):
69
+ sents = text_split_by_punctuation(context, return_dict=True)
70
+ splited_context = ""
71
+ for i, s in enumerate(sents):
72
+ st, ed = s['start_idx'], s['end_idx']
73
+ assert s['content'] == context[st:ed], s
74
+ ed = sents[i+1]['start_idx'] if i < len(sents)-1 else len(context)
75
+ sents[i] = {
76
+ 'content': context[st:ed],
77
+ 'start': st,
78
+ 'end': ed,
79
+ 'c_idx': s['c_idx'],
80
+ }
81
+ splited_context += f"<C{i}>"+context[st:ed]
82
+ prompt = '''Please answer the user's question based on the following document. When a sentence S in your response uses information from some chunks in the document (i.e., <C{s1}>-<C_{e1}>, <C{s2}>-<C{e2}>, ...), please append these chunk numbers to S in the format "<statement>{S}<cite>[{s1}-{e1}][{s2}-{e2}]...</cite></statement>". You must answer in the same language as the user's question.\n\n[Document Start]\n%s\n[Document End]\n\n%s''' % (splited_context, question)
83
+ return prompt, sents, splited_context
84
+
85
+ def get_citations(statement, sents):
86
+ c_texts = re.findall(r'<cite>(.*?)</cite>', statement, re.DOTALL)
87
+ spans = sum([re.findall(r"\[([0-9]+\-[0-9]+)\]", c_text, re.DOTALL) for c_text in c_texts], [])
88
+ statement = re.sub(r'<cite>(.*?)</cite>', '', statement, flags=re.DOTALL)
89
+ merged_citations = []
90
+ for i, s in enumerate(spans):
91
+ try:
92
+ st, ed = [int(x) for x in s.split('-')]
93
+ if st > len(sents) - 1 or ed < st:
94
+ continue
95
+ st, ed = max(0, st), min(ed, len(sents)-1)
96
+ assert st <= ed, str(c_texts) + '\t' + str(len(sents))
97
+ if len(merged_citations) > 0 and st == merged_citations[-1]['end_sentence_idx'] + 1:
98
+ merged_citations[-1].update({
99
+ "end_sentence_idx": ed,
100
+ 'end_char_idx': sents[ed]['end'],
101
+ 'cite': ''.join([x['content'] for x in sents[merged_citations[-1]['start_sentence_idx']:ed+1]]),
102
+ })
103
+ else:
104
+ merged_citations.append({
105
+ "start_sentence_idx": st,
106
+ "end_sentence_idx": ed,
107
+ "start_char_idx": sents[st]['start'],
108
+ 'end_char_idx': sents[ed]['end'],
109
+ 'cite': ''.join([x['content'] for x in sents[st:ed+1]]),
110
+ })
111
+ except:
112
+ print(c_texts, len(sents), statement)
113
+ raise
114
+ return statement, merged_citations[:3]
115
+
116
+ def postprocess(answer, sents, splited_context):
117
+ res = []
118
+ pos = 0
119
+ new_answer = ""
120
+ while True:
121
+ st = answer.find("<statement>", pos)
122
+ if st == -1:
123
+ st = len(answer)
124
+ ed = answer.find("</statement>", st)
125
+ statement = answer[pos:st]
126
+ if len(statement.strip()) > 5:
127
+ res.append({
128
+ "statement": statement,
129
+ "citation": []
130
+ })
131
+ new_answer += f"<statement>{statement}<cite></cite></statement>"
132
+ else:
133
+ res.append({
134
+ "statement": statement,
135
+ "citation": None,
136
+ })
137
+ new_answer += statement
138
+
139
+ if ed == -1:
140
+ break
141
+
142
+ statement = answer[st+len("<statement>"):ed]
143
+ if len(statement.strip()) > 0:
144
+ statement, citations = get_citations(statement, sents)
145
+ res.append({
146
+ "statement": statement,
147
+ "citation": citations
148
+ })
149
+ c_str = ''.join(['[{}-{}]'.format(c['start_sentence_idx'], c['end_sentence_idx']) for c in citations])
150
+ new_answer += f"<statement>{statement}<cite>{c_str}</cite></statement>"
151
+ else:
152
+ res.append({
153
+ "statement": statement,
154
+ "citation": None,
155
+ })
156
+ new_answer += statement
157
+ pos = ed + len("</statement>")
158
+ return {
159
+ "answer": new_answer.strip(),
160
+ "statements_with_citations": [x for x in res if x['citation'] is not None],
161
+ "splited_context": splited_context.strip(),
162
+ "all_statements": res,
163
+ }
164
+
165
+ def truncate_from_middle(prompt, max_input_length=None, tokenizer=None):
166
+ if max_input_length is None:
167
+ return prompt
168
+ else:
169
+ assert tokenizer is not None
170
+ tokenized_prompt = tokenizer.encode(prompt, add_special_tokens=False)
171
+ if len(tokenized_prompt) > max_input_length:
172
+ half = int(max_input_length/2)
173
+ prompt = tokenizer.decode(tokenized_prompt[:half], skip_special_tokens=True)+tokenizer.decode(tokenized_prompt[-half:], skip_special_tokens=True)
174
+ return prompt
175
+
176
+ prompt, sents, splited_context = get_prompt(context, query)
177
+ prompt = truncate_from_middle(prompt, max_input_length, tokenizer)
178
+ output, _ = self.chat(tokenizer, prompt, history=[], max_new_tokens=max_new_tokens, temperature=temperature)
179
+ result = postprocess(output, sents, splited_context)
180
+ return result
181
+
182
+
183
+ if __name__ == "__main__":
184
+ model_path = "THUDM/LongCite-llama3.1-8b"
185
+ model = LongCiteModel(
186
+ model= model_path,
187
+ dtype=torch.bfloat16,
188
+ trust_remote_code=True,
189
+ tensor_parallel_size=1,
190
+ max_model_len=131072,
191
+ gpu_memory_utilization=1,
192
+ )
193
+ tokenizer = model.get_tokenizer()
194
+
195
+ context = '''
196
+ W. Russell Todd, 94, United States Army general (b. 1928). February 13. Tim Aymar, 59, heavy metal singer (Pharaoh) (b. 1963). Marshall \"Eddie\" Conway, 76, Black Panther Party leader (b. 1946). Roger Bonk, 78, football player (North Dakota Fighting Sioux, Winnipeg Blue Bombers) (b. 1944). Conrad Dobler, 72, football player (St. Louis Cardinals, New Orleans Saints, Buffalo Bills) (b. 1950). Brian DuBois, 55, baseball player (Detroit Tigers) (b. 1967). Robert Geddes, 99, architect, dean of the Princeton University School of Architecture (1965–1982) (b. 1923). Tom Luddy, 79, film producer (Barfly, The Secret Garden), co-founder of the Telluride Film Festival (b. 1943). David Singmaster, 84, mathematician (b. 1938).
197
+ '''
198
+ query = "What was Robert Geddes' profession?"
199
+ result = model.query_longcite(context, query, tokenizer=tokenizer, max_input_length=128000, max_new_tokens=1024)
200
+
201
+ print("Answer:")
202
+ print(result['answer'])
203
+ print('\n')
204
+ print("Statement with citations:" )
205
+ print(json.dumps(result['statements_with_citations'], indent=2, ensure_ascii=False))
206
+ print('\n')
207
+ print("Context (divided into sentences):")
208
+ print(result['splited_context'])