JessyTsu1 commited on
Commit
5eb30a1
1 Parent(s): a7de9b1

Upload configuration_baichuan.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. configuration_baichuan.py +69 -0
configuration_baichuan.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Baichuan Inc. All Rights Reserved.
2
+
3
+ # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
6
+ # and OPT implementations in this library. It has been modified from its
7
+ # original forms to accommodate minor architectural differences compared
8
+ # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
9
+ #
10
+ # Licensed under the Apache License, Version 2.0 (the "License");
11
+ # you may not use this file except in compliance with the License.
12
+ # You may obtain a copy of the License at
13
+ #
14
+ # http://www.apache.org/licenses/LICENSE-2.0
15
+ #
16
+ # Unless required by applicable law or agreed to in writing, software
17
+ # distributed under the License is distributed on an "AS IS" BASIS,
18
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
19
+ # See the License for the specific language governing permissions and
20
+ # limitations under the License.
21
+
22
+ from transformers.configuration_utils import PretrainedConfig
23
+ from transformers.utils import logging
24
+
25
+
26
+ logger = logging.get_logger(__name__)
27
+
28
+
29
+ class BaichuanConfig(PretrainedConfig):
30
+ model_type = "baichuan"
31
+ keys_to_ignore_at_inference = ["past_key_values"]
32
+
33
+ def __init__(
34
+ self,
35
+ vocab_size=125696,
36
+ hidden_size=4096,
37
+ intermediate_size=11008,
38
+ num_hidden_layers=32,
39
+ num_attention_heads=32,
40
+ hidden_act="silu",
41
+ max_position_embeddings=4096,
42
+ initializer_range=0.02,
43
+ rms_norm_eps=1e-6,
44
+ use_cache=True,
45
+ pad_token_id=0,
46
+ bos_token_id=1,
47
+ eos_token_id=2,
48
+ tie_word_embeddings=False,
49
+ z_loss_weight=0,
50
+ **kwargs,
51
+ ):
52
+ self.vocab_size = vocab_size
53
+ self.max_position_embeddings = max_position_embeddings
54
+ self.hidden_size = hidden_size
55
+ self.intermediate_size = intermediate_size
56
+ self.num_hidden_layers = num_hidden_layers
57
+ self.num_attention_heads = num_attention_heads
58
+ self.hidden_act = hidden_act
59
+ self.initializer_range = initializer_range
60
+ self.rms_norm_eps = rms_norm_eps
61
+ self.use_cache = use_cache
62
+ self.z_loss_weight = z_loss_weight
63
+ super().__init__(
64
+ pad_token_id=pad_token_id,
65
+ bos_token_id=bos_token_id,
66
+ eos_token_id=eos_token_id,
67
+ tie_word_embeddings=tie_word_embeddings,
68
+ **kwargs,
69
+ )