haoyang-amd commited on
Commit
f66e03b
·
verified ·
1 Parent(s): ae670da

Customise your commit message

Browse files
config.json ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/group/amdneuralopt/huggingface/pretrained_models/deepseek-ai/deepseek-moe-16b-chat",
3
+ "architectures": [
4
+ "DeepseekForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "auto_map": {
9
+ "AutoConfig": "configuration_deepseek.DeepseekConfig",
10
+ "AutoModel": "modeling_deepseek.DeepseekModel",
11
+ "AutoModelForCausalLM": "modeling_deepseek.DeepseekForCausalLM"
12
+ },
13
+ "aux_loss_alpha": 0.001,
14
+ "bos_token_id": 100000,
15
+ "eos_token_id": 100001,
16
+ "first_k_dense_replace": 1,
17
+ "hidden_act": "silu",
18
+ "hidden_size": 2048,
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 10944,
21
+ "max_position_embeddings": 4096,
22
+ "model_type": "deepseek",
23
+ "moe_intermediate_size": 1408,
24
+ "moe_layer_freq": 1,
25
+ "n_routed_experts": 64,
26
+ "n_shared_experts": 2,
27
+ "norm_topk_prob": false,
28
+ "num_attention_heads": 16,
29
+ "num_experts_per_tok": 6,
30
+ "num_hidden_layers": 28,
31
+ "num_key_value_heads": 16,
32
+ "pretraining_tp": 1,
33
+ "quantization_config": {
34
+ "activation_scheme": "static",
35
+ "export": {
36
+ "kv_cache_group": [
37
+ "*k_proj",
38
+ "*v_proj"
39
+ ],
40
+ "min_kv_scale": 1.0,
41
+ "pack_method": "reorder",
42
+ "weight_format": "real_quantized",
43
+ "weight_merge_groups": null
44
+ },
45
+ "ignored_layers": [
46
+ "lm_head"
47
+ ],
48
+ "int4_experts": {
49
+ "bits": 4,
50
+ "group": "column",
51
+ "sym": true
52
+ },
53
+ "kv_cache_scheme": "static",
54
+ "quant_method": "fp8"
55
+ },
56
+ "rms_norm_eps": 1e-06,
57
+ "rope_scaling": null,
58
+ "rope_theta": 10000,
59
+ "scoring_func": "softmax",
60
+ "seq_aux": true,
61
+ "tie_word_embeddings": false,
62
+ "torch_dtype": "bfloat16",
63
+ "transformers_version": "4.47.1",
64
+ "use_cache": true,
65
+ "vocab_size": 102400
66
+ }
configuration_deepseek.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers.configuration_utils import PretrainedConfig
2
+ from transformers.utils import logging
3
+
4
+ logger = logging.get_logger(__name__)
5
+
6
+ DEEPSEEK_PRETRAINED_CONFIG_ARCHIVE_MAP = {}
7
+ class DeepseekConfig(PretrainedConfig):
8
+ r"""
9
+ This is the configuration class to store the configuration of a [`DeepseekModel`]. It is used to instantiate an DeepSeek
10
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
11
+ defaults will yield a similar configuration to that of the DeepSeek-7B.
12
+
13
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
14
+ documentation from [`PretrainedConfig`] for more information.
15
+
16
+
17
+ Args:
18
+ vocab_size (`int`, *optional*, defaults to 102400):
19
+ Vocabulary size of the Deep model. Defines the number of different tokens that can be represented by the
20
+ `inputs_ids` passed when calling [`DeepseekModel`]
21
+ hidden_size (`int`, *optional*, defaults to 4096):
22
+ Dimension of the hidden representations.
23
+ intermediate_size (`int`, *optional*, defaults to 11008):
24
+ Dimension of the MLP representations.
25
+ moe_intermediate_size (`int`, *optional*, defaults to 1407):
26
+ Dimension of the MoE representations.
27
+ num_hidden_layers (`int`, *optional*, defaults to 32):
28
+ Number of hidden layers in the Transformer decoder.
29
+ num_attention_heads (`int`, *optional*, defaults to 32):
30
+ Number of attention heads for each attention layer in the Transformer decoder.
31
+ n_shared_experts (`int`, *optional*, defaults to None):
32
+ Number of shared experts, None means dense model.
33
+ n_routed_experts (`int`, *optional*, defaults to None):
34
+ Number of routed experts, None means dense model.
35
+ num_experts_per_tok (`int`, *optional*, defaults to None):
36
+ Number of selected experts, None means dense model.
37
+ moe_layer_freq (`int`, *optional*, defaults to 1):
38
+ The frequency of the MoE layer: one expert layer for every `moe_layer_freq - 1` dense layers.
39
+ first_k_dense_replace (`int`, *optional*, defaults to 0):
40
+ Number of dense layers in shallow layers(embed->dense->dense->...->dense->moe->moe...->lm_head).
41
+ \--k dense layers--/
42
+ norm_topk_prob (`bool`, *optional*, defaults to False):
43
+ Whether to normalize the weights of the routed experts.
44
+ scoring_func (`str`, *optional*, defaults to 'softmax'):
45
+ Method of computing expert weights.
46
+ aux_loss_alpha (`float`, *optional*, defaults to 0.001):
47
+ Auxiliary loss weight coefficient.
48
+ seq_aux = (`bool`, *optional*, defaults to True):
49
+ Whether to compute the auxiliary loss for each individual sample.
50
+ num_key_value_heads (`int`, *optional*):
51
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
52
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
53
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
54
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
55
+ by meanpooling all the original heads within that group. For more details checkout [this
56
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
57
+ `num_attention_heads`.
58
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
59
+ The non-linear activation function (function or string) in the decoder.
60
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
61
+ The maximum sequence length that this model might ever be used with.
62
+ initializer_range (`float`, *optional*, defaults to 0.02):
63
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
64
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
65
+ The epsilon used by the rms normalization layers.
66
+ use_cache (`bool`, *optional*, defaults to `True`):
67
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
68
+ relevant if `config.is_decoder=True`.
69
+ pad_token_id (`int`, *optional*):
70
+ Padding token id.
71
+ bos_token_id (`int`, *optional*, defaults to 1):
72
+ Beginning of stream token id.
73
+ eos_token_id (`int`, *optional*, defaults to 2):
74
+ End of stream token id.
75
+ pretraining_tp (`int`, *optional*, defaults to 1):
76
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
77
+ document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
78
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
79
+ issue](https://github.com/pytorch/pytorch/issues/76232).
80
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
81
+ Whether to tie weight embeddings
82
+ rope_theta (`float`, *optional*, defaults to 10000.0):
83
+ The base period of the RoPE embeddings.
84
+ rope_scaling (`Dict`, *optional*):
85
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
86
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
87
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
88
+ `max_position_embeddings` to the expected new maximum.
89
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
90
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
91
+ attention_dropout (`float`, *optional*, defaults to 0.0):
92
+ The dropout ratio for the attention probabilities.
93
+
94
+ ```python
95
+ >>> from transformers import DeepseekModel, DeepseekConfig
96
+
97
+ >>> # Initializing a Deepseek deepseek-7b style configuration
98
+ >>> configuration = DeepseekConfig()
99
+
100
+ >>> # Accessing the model configuration
101
+ >>> configuration = model.config
102
+ ```"""
103
+
104
+ model_type = "deepseek"
105
+ keys_to_ignore_at_inference = ["past_key_values"]
106
+
107
+ def __init__(
108
+ self,
109
+ vocab_size=102400,
110
+ hidden_size=4096,
111
+ intermediate_size=11008,
112
+ moe_intermediate_size = 1407,
113
+ num_hidden_layers=30,
114
+ num_attention_heads=32,
115
+ num_key_value_heads=32,
116
+ n_shared_experts = None,
117
+ n_routed_experts = None,
118
+ num_experts_per_tok = None,
119
+ moe_layer_freq = 1,
120
+ first_k_dense_replace = 0,
121
+ norm_topk_prob = False,
122
+ scoring_func = 'softmax',
123
+ aux_loss_alpha = 0.001,
124
+ seq_aux = True,
125
+ hidden_act="silu",
126
+ max_position_embeddings=2048,
127
+ initializer_range=0.02,
128
+ rms_norm_eps=1e-6,
129
+ use_cache=True,
130
+ pad_token_id=None,
131
+ bos_token_id=100000,
132
+ eos_token_id=100001,
133
+ pretraining_tp=1,
134
+ tie_word_embeddings=False,
135
+ rope_theta=10000.0,
136
+ rope_scaling=None,
137
+ attention_bias=False,
138
+ attention_dropout=0.0,
139
+ **kwargs,
140
+ ):
141
+ self.vocab_size = vocab_size
142
+ self.max_position_embeddings = max_position_embeddings
143
+ self.hidden_size = hidden_size
144
+ self.intermediate_size = intermediate_size
145
+ self.moe_intermediate_size = moe_intermediate_size
146
+ self.num_hidden_layers = num_hidden_layers
147
+ self.num_attention_heads = num_attention_heads
148
+ self.n_shared_experts = n_shared_experts
149
+ self.n_routed_experts = n_routed_experts
150
+ self.num_experts_per_tok = num_experts_per_tok
151
+ self.moe_layer_freq = moe_layer_freq
152
+ self.first_k_dense_replace = first_k_dense_replace
153
+ self.norm_topk_prob = norm_topk_prob
154
+ self.scoring_func = scoring_func
155
+ self.aux_loss_alpha = aux_loss_alpha
156
+ self.seq_aux = seq_aux
157
+ # for backward compatibility
158
+ if num_key_value_heads is None:
159
+ num_key_value_heads = num_attention_heads
160
+
161
+ self.num_key_value_heads = num_key_value_heads
162
+ self.hidden_act = hidden_act
163
+ self.initializer_range = initializer_range
164
+ self.rms_norm_eps = rms_norm_eps
165
+ self.pretraining_tp = pretraining_tp
166
+ self.use_cache = use_cache
167
+ self.rope_theta = rope_theta
168
+ self.rope_scaling = rope_scaling
169
+ self._rope_scaling_validation()
170
+ self.attention_bias = attention_bias
171
+ self.attention_dropout = attention_dropout
172
+
173
+ super().__init__(
174
+ pad_token_id=pad_token_id,
175
+ bos_token_id=bos_token_id,
176
+ eos_token_id=eos_token_id,
177
+ tie_word_embeddings=tie_word_embeddings,
178
+ **kwargs,
179
+ )
180
+
181
+ def _rope_scaling_validation(self):
182
+ """
183
+ Validate the `rope_scaling` configuration.
184
+ """
185
+ if self.rope_scaling is None:
186
+ return
187
+
188
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
189
+ raise ValueError(
190
+ "`rope_scaling` must be a dictionary with with two fields, `type` and `factor`, "
191
+ f"got {self.rope_scaling}"
192
+ )
193
+ rope_scaling_type = self.rope_scaling.get("type", None)
194
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
195
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
196
+ raise ValueError(
197
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
198
+ )
199
+ if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
200
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1, got {rope_scaling_factor}")
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 100000,
4
+ "eos_token_id": 100001,
5
+ "transformers_version": "4.47.1"
6
+ }
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b59aa60c69b3c52aaa6fefdbd58e46991ffe3371379bf0fe2b10eae82ac359a9
3
+ size 5000033692
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:565f05fc31b880c4e396013b7fcd4af8276e7bbcf5129489622c1379e94decf2
3
+ size 4110847836
model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|begin▁of▁sentence|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|end▁of▁sentence|>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|end▁of▁sentence|>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,139 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "100000": {
7
+ "content": "<|begin▁of▁sentence|>",
8
+ "lstrip": false,
9
+ "normalized": true,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "100001": {
15
+ "content": "<|end▁of▁sentence|>",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "100002": {
23
+ "content": "ø",
24
+ "lstrip": false,
25
+ "normalized": true,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": false
29
+ },
30
+ "100003": {
31
+ "content": "ö",
32
+ "lstrip": false,
33
+ "normalized": true,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": false
37
+ },
38
+ "100004": {
39
+ "content": "ú",
40
+ "lstrip": false,
41
+ "normalized": true,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": false
45
+ },
46
+ "100005": {
47
+ "content": "ÿ",
48
+ "lstrip": false,
49
+ "normalized": true,
50
+ "rstrip": false,
51
+ "single_word": false,
52
+ "special": false
53
+ },
54
+ "100006": {
55
+ "content": "õ",
56
+ "lstrip": false,
57
+ "normalized": true,
58
+ "rstrip": false,
59
+ "single_word": false,
60
+ "special": false
61
+ },
62
+ "100007": {
63
+ "content": "÷",
64
+ "lstrip": false,
65
+ "normalized": true,
66
+ "rstrip": false,
67
+ "single_word": false,
68
+ "special": false
69
+ },
70
+ "100008": {
71
+ "content": "û",
72
+ "lstrip": false,
73
+ "normalized": true,
74
+ "rstrip": false,
75
+ "single_word": false,
76
+ "special": false
77
+ },
78
+ "100009": {
79
+ "content": "ý",
80
+ "lstrip": false,
81
+ "normalized": true,
82
+ "rstrip": false,
83
+ "single_word": false,
84
+ "special": false
85
+ },
86
+ "100010": {
87
+ "content": "À",
88
+ "lstrip": false,
89
+ "normalized": true,
90
+ "rstrip": false,
91
+ "single_word": false,
92
+ "special": false
93
+ },
94
+ "100011": {
95
+ "content": "ù",
96
+ "lstrip": false,
97
+ "normalized": true,
98
+ "rstrip": false,
99
+ "single_word": false,
100
+ "special": false
101
+ },
102
+ "100012": {
103
+ "content": "Á",
104
+ "lstrip": false,
105
+ "normalized": true,
106
+ "rstrip": false,
107
+ "single_word": false,
108
+ "special": false
109
+ },
110
+ "100013": {
111
+ "content": "þ",
112
+ "lstrip": false,
113
+ "normalized": true,
114
+ "rstrip": false,
115
+ "single_word": false,
116
+ "special": false
117
+ },
118
+ "100014": {
119
+ "content": "ü",
120
+ "lstrip": false,
121
+ "normalized": true,
122
+ "rstrip": false,
123
+ "single_word": false,
124
+ "special": false
125
+ }
126
+ },
127
+ "bos_token": "<|begin▁of▁sentence|>",
128
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{{ bos_token }}{% for message in messages %}{% if message['role'] == 'user' %}{{ 'User: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ 'Assistant: ' + message['content'] + eos_token }}{% elif message['role'] == 'system' %}{{ message['content'] + '\n\n' }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}",
129
+ "clean_up_tokenization_spaces": false,
130
+ "eos_token": "<|end▁of▁sentence|>",
131
+ "extra_special_tokens": {},
132
+ "legacy": true,
133
+ "model_max_length": 16384,
134
+ "pad_token": "<|end▁of▁sentence|>",
135
+ "sp_model_kwargs": {},
136
+ "tokenizer_class": "LlamaTokenizer",
137
+ "unk_token": null,
138
+ "use_default_system_prompt": false
139
+ }