ehartford commited on
Commit
bc37bad
1 Parent(s): 73a122a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. config.json +48 -0
  2. configuration_hunyuan.py +206 -0
  3. generation_config.json +13 -0
  4. hy.tiktoken +0 -0
  5. model-00001-of-00159.safetensors +3 -0
  6. model-00002-of-00159.safetensors +3 -0
  7. model-00003-of-00159.safetensors +3 -0
  8. model-00004-of-00159.safetensors +3 -0
  9. model-00006-of-00159.safetensors +3 -0
  10. model-00008-of-00159.safetensors +3 -0
  11. model-00010-of-00159.safetensors +3 -0
  12. model-00011-of-00159.safetensors +3 -0
  13. model-00013-of-00159.safetensors +3 -0
  14. model-00014-of-00159.safetensors +3 -0
  15. model-00015-of-00159.safetensors +3 -0
  16. model-00016-of-00159.safetensors +3 -0
  17. model-00017-of-00159.safetensors +3 -0
  18. model-00018-of-00159.safetensors +3 -0
  19. model-00020-of-00159.safetensors +3 -0
  20. model-00021-of-00159.safetensors +3 -0
  21. model-00022-of-00159.safetensors +3 -0
  22. model-00024-of-00159.safetensors +3 -0
  23. model-00025-of-00159.safetensors +3 -0
  24. model-00026-of-00159.safetensors +3 -0
  25. model-00027-of-00159.safetensors +3 -0
  26. model-00028-of-00159.safetensors +3 -0
  27. model-00029-of-00159.safetensors +3 -0
  28. model-00030-of-00159.safetensors +3 -0
  29. model-00031-of-00159.safetensors +3 -0
  30. model-00032-of-00159.safetensors +3 -0
  31. model-00033-of-00159.safetensors +3 -0
  32. model-00034-of-00159.safetensors +3 -0
  33. model-00035-of-00159.safetensors +3 -0
  34. model-00036-of-00159.safetensors +3 -0
  35. model-00037-of-00159.safetensors +3 -0
  36. model-00038-of-00159.safetensors +3 -0
  37. model-00039-of-00159.safetensors +3 -0
  38. model-00040-of-00159.safetensors +3 -0
  39. model-00041-of-00159.safetensors +3 -0
  40. model-00042-of-00159.safetensors +3 -0
  41. model-00043-of-00159.safetensors +3 -0
  42. model-00045-of-00159.safetensors +3 -0
  43. model-00046-of-00159.safetensors +3 -0
  44. model-00047-of-00159.safetensors +3 -0
  45. model-00048-of-00159.safetensors +3 -0
  46. model-00049-of-00159.safetensors +3 -0
  47. model-00051-of-00159.safetensors +3 -0
  48. model-00052-of-00159.safetensors +3 -0
  49. model-00053-of-00159.safetensors +3 -0
  50. model-00057-of-00159.safetensors +3 -0
config.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/workspace/models/tencent/Hunyuan-A52B-Instruct",
3
+ "architectures": [
4
+ "HunYuanForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "auto_map": {
9
+ "AutoConfig": "configuration_hunyuan.HunYuanConfig",
10
+ "AutoModel": "modeling_hunyuan.HunyuanModel",
11
+ "AutoModelForCausalLM": "modeling_hunyuan.HunYuanForCausalLM"
12
+ },
13
+ "bos_token_id": 1,
14
+ "capacity_factor": 1.0,
15
+ "cla_share_factor": 2,
16
+ "eos_token_id": 2,
17
+ "hidden_act": "silu",
18
+ "hidden_size": 6400,
19
+ "initializer_range": 0.02,
20
+ "intermediate_size": 18304,
21
+ "max_position_embeddings": 131072,
22
+ "model_type": "hunyuan",
23
+ "moe_drop_tokens": false,
24
+ "moe_random_routing_dropped_token": false,
25
+ "moe_topk": 1,
26
+ "num_attention_heads": 80,
27
+ "num_experts": 16,
28
+ "num_hidden_layers": 64,
29
+ "num_key_value_heads": 8,
30
+ "num_shared_expert": 1,
31
+ "pad_token_id": 0,
32
+ "pretraining_tp": 1,
33
+ "rms_norm_eps": 1e-05,
34
+ "rope_scaling": {
35
+ "alpha": 1000.0,
36
+ "factor": 1.0,
37
+ "type": "dynamic"
38
+ },
39
+ "rope_theta": 10000.0,
40
+ "tie_word_embeddings": true,
41
+ "torch_dtype": "bfloat16",
42
+ "transformers_version": "4.45.0.dev0",
43
+ "use_cache": true,
44
+ "use_cla": true,
45
+ "use_mixed_mlp_moe": true,
46
+ "use_qk_norm": true,
47
+ "vocab_size": 129024
48
+ }
configuration_hunyuan.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (C) 2024 THL A29 Limited, a Tencent company. All rights reserved.
2
+ #
3
+ # Licensed under the TENCENT HUNYUAN COMMUNITY LICENSE AGREEMENT (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # https://github.com/Tencent/Tencent-Hunyuan-Large/blob/main/License.docx
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ """ HunYuan model configuration"""
15
+
16
+ from transformers.configuration_utils import PretrainedConfig
17
+ from transformers.utils import logging
18
+
19
+
20
+ logger = logging.get_logger(__name__)
21
+
22
+
23
+ class HunYuanConfig(PretrainedConfig):
24
+ r"""
25
+ This is the configuration class to store the configuration of a [`HunYuanModel`]. It is used to instantiate an
26
+ HunYuan model according to the specified arguments, defining the model architecture. Instantiating a configuration
27
+ with the defaults will yield a similar configuration to that of the HunYuan-7B.
28
+
29
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
30
+ documentation from [`PretrainedConfig`] for more information.
31
+
32
+
33
+ Args:
34
+ vocab_size (`int`, *optional*, defaults to 32000):
35
+ Vocabulary size of the HunYuan model. Defines the number of different tokens that can be represented by the
36
+ `inputs_ids` passed when calling [`HunYuanModel`]
37
+ hidden_size (`int`, *optional*, defaults to 4096):
38
+ Dimension of the hidden representations.
39
+ intermediate_size (`int`, *optional*, defaults to 11008):
40
+ Dimension of the MLP representations.
41
+ num_hidden_layers (`int`, *optional*, defaults to 32):
42
+ Number of hidden layers in the Transformer decoder.
43
+ num_attention_heads (`int`, *optional*, defaults to 32):
44
+ Number of attention heads for each attention layer in the Transformer decoder.
45
+ num_key_value_heads (`int`, *optional*):
46
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
47
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
48
+ `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When
49
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
50
+ by meanpooling all the original heads within that group. For more details checkout [this
51
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to
52
+ `num_attention_heads`.
53
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
54
+ The non-linear activation function (function or string) in the decoder.
55
+ max_position_embeddings (`int`, *optional*, defaults to 2048):
56
+ The maximum sequence length that this model might ever be used with.
57
+ initializer_range (`float`, *optional*, defaults to 0.02):
58
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
59
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
60
+ The epsilon used by the rms normalization layers.
61
+ use_cache (`bool`, *optional*, defaults to `True`):
62
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
63
+ relevant if `config.is_decoder=True`.
64
+ pad_token_id (`int`, *optional*):
65
+ Padding token id.
66
+ bos_token_id (`int`, *optional*, defaults to 1):
67
+ Beginning of stream token id.
68
+ eos_token_id (`int`, *optional*, defaults to 2):
69
+ End of stream token id.
70
+ pretraining_tp (`int`, *optional*, defaults to 1):
71
+ Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this
72
+ document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is
73
+ necessary to ensure exact reproducibility of the pretraining results. Please refer to [this
74
+ issue](https://github.com/pytorch/pytorch/issues/76232).
75
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
76
+ Whether to tie weight embeddings
77
+ rope_theta (`float`, *optional*, defaults to 10000.0):
78
+ The base period of the RoPE embeddings.
79
+ rope_scaling (`Dict`, *optional*):
80
+ Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling
81
+ strategies: linear and dynamic. Their scaling factor must be a float greater than 1. The expected format is
82
+ `{"type": strategy name, "factor": scaling factor}`. When using this flag, don't update
83
+ `max_position_embeddings` to the expected new maximum. See the following thread for more information on how
84
+ these scaling strategies behave:
85
+ https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an
86
+ experimental feature, subject to breaking API changes in future versions.
87
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
88
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
89
+ attention_dropout (`float`, *optional*, defaults to 0.0):
90
+ The dropout ratio for the attention probabilities.
91
+ use_qk_norm (`bool`, *optional*, defaults to `False`):
92
+ Whether query and key in attention use norm
93
+ use_cla (`bool`, *optional*, defaults to `False`):
94
+ Whether to use CLA in attention
95
+ cla_share_factor (`int`, *optional*, defaults to 1):
96
+ The share factor of CLA
97
+ """
98
+
99
+ model_type = "hunyuan"
100
+ keys_to_ignore_at_inference = ["past_key_values"]
101
+
102
+ def __init__(
103
+ self,
104
+ vocab_size=290943,
105
+ hidden_size=4096,
106
+ intermediate_size=11008,
107
+ num_hidden_layers=32,
108
+ num_attention_heads=32,
109
+ num_key_value_heads=None,
110
+ hidden_act="silu",
111
+ max_position_embeddings=2048,
112
+ initializer_range=0.02,
113
+ rms_norm_eps=1e-5,
114
+ use_cache=True,
115
+ pad_token_id=0,
116
+ bos_token_id=1,
117
+ eos_token_id=2,
118
+ pretraining_tp=1,
119
+ tie_word_embeddings=False,
120
+ rope_theta=10000.0,
121
+ rope_scaling=None,
122
+ attention_bias=False,
123
+ attention_dropout=0.0,
124
+ use_qk_norm=False,
125
+ use_cla=False,
126
+ cla_share_factor=1,
127
+ num_experts=1,
128
+ use_mixed_mlp_moe=False,
129
+ num_shared_expert=1,
130
+ moe_topk=1,
131
+ capacity_factor=1.0,
132
+ moe_drop_tokens=False,
133
+ moe_random_routing_dropped_token=False,
134
+ **kwargs,
135
+ ):
136
+ self.vocab_size = vocab_size
137
+ self.max_position_embeddings = max_position_embeddings
138
+ self.hidden_size = hidden_size
139
+ self.intermediate_size = intermediate_size
140
+ self.num_hidden_layers = num_hidden_layers
141
+ self.num_attention_heads = num_attention_heads
142
+ self.num_experts = num_experts
143
+ self.use_mixed_mlp_moe = use_mixed_mlp_moe
144
+ self.num_shared_expert = num_shared_expert
145
+ self.moe_topk = moe_topk
146
+ self.capacity_factor = capacity_factor
147
+ self.moe_drop_tokens = moe_drop_tokens
148
+ self.moe_random_routing_dropped_token = moe_random_routing_dropped_token
149
+
150
+ # for backward compatibility
151
+ if num_key_value_heads is None:
152
+ num_key_value_heads = num_attention_heads
153
+
154
+ self.num_key_value_heads = num_key_value_heads
155
+ self.hidden_act = hidden_act
156
+ self.initializer_range = initializer_range
157
+ self.rms_norm_eps = rms_norm_eps
158
+ self.pretraining_tp = pretraining_tp
159
+ self.use_cache = use_cache
160
+ self.rope_theta = rope_theta
161
+ self.rope_scaling = rope_scaling
162
+ # self._rope_scaling_validation() # TODO: Need validation?
163
+ self.attention_bias = attention_bias
164
+ self.attention_dropout = attention_dropout
165
+ self.use_qk_norm = use_qk_norm
166
+ self.use_cla = use_cla
167
+ self.cla_share_factor = cla_share_factor
168
+
169
+ super().__init__(
170
+ pad_token_id=pad_token_id,
171
+ bos_token_id=bos_token_id,
172
+ eos_token_id=eos_token_id,
173
+ tie_word_embeddings=tie_word_embeddings,
174
+ **kwargs,
175
+ )
176
+
177
+ def _rope_scaling_validation(self):
178
+ """
179
+ Validate the `rope_scaling` configuration.
180
+ """
181
+ if self.rope_scaling is None:
182
+ return
183
+
184
+ if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:
185
+ raise ValueError(
186
+ "`rope_scaling` must be a dictionary with with two fields, `type` and `factor` or `type` and `alpha`, "
187
+ f"got {self.rope_scaling}"
188
+ )
189
+ rope_scaling_type = self.rope_scaling.get("type", None)
190
+ rope_scaling_factor = self.rope_scaling.get("factor", None)
191
+ rope_scaling_alpha = self.rope_scaling.get("alpha", None)
192
+ if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
193
+ raise ValueError(
194
+ f"`rope_scaling`'s type field must be one of ['linear', 'dynamic'], got {rope_scaling_type}"
195
+ )
196
+ if rope_scaling_factor is None and rope_scaling_alpha is None:
197
+ raise ValueError(f"`rope_scaling`'s factor or alpha field must be have one, got both of none")
198
+ if rope_scaling_factor is not None:
199
+ if not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:
200
+ raise ValueError(f"`rope_scaling`'s factor field must be a float > 1.0, got {rope_scaling_factor}")
201
+ if rope_scaling_alpha is not None:
202
+ if not isinstance(rope_scaling_alpha, float) or rope_scaling_alpha <= 1.0:
203
+ raise ValueError(f"`rope_scaling`'s alpha field must be a float > 1.0, got {rope_scaling_alpha}")
204
+
205
+
206
+
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_sample": true,
3
+ "eos_token_id": [
4
+ 127960,
5
+ 127967
6
+ ],
7
+ "pad_token_id": 127961,
8
+ "repetition_penalty": 1.05,
9
+ "temperature": 0.7,
10
+ "top_k": 20,
11
+ "top_p": 0.6,
12
+ "transformers_version": "4.45.0.dev0"
13
+ }
hy.tiktoken ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f87c7709e11ead74f54900dce42f7432c7b2cb88837b8c8481c46464a86d2c0e
3
+ size 4877929280
model-00002-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:313259353956fd8ce8d47043f0a7272583223ba08a11b98c7c887f83117a1359
3
+ size 4920117840
model-00003-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:006e896c5b39d3e119a63893f4dea5653554403a32da87644525965a38ae4e5f
3
+ size 4850102848
model-00004-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:670d998d0cdffc7efdd7f74f082943b5d04cabd34c5e6a028e75d807d824ee49
3
+ size 4920117840
model-00006-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aed3496bb2b6fe022e525b4a5ec6ef4afd0d1711bda79c09e6ad2c1a9155eb75
3
+ size 4866487064
model-00008-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d17aa885916c921dfa9c9b979f81edcd7c2a2a39a3cd3e0970c72bbf187cf3cf
3
+ size 4850102848
model-00010-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:336e1a5459195fd48d6319563dd3bdb1c3bcd7e57489d34121e271bed29cd85a
3
+ size 4920117848
model-00011-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76c4569f30ba3cacb414caacd096aad71650b51200a11b98fd6c21dff481b87d
3
+ size 4866487064
model-00013-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:609d343a156f5cf63ab9c8c5054a2c09a902411a205fbd34d147755d350e6c92
3
+ size 4850102848
model-00014-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:daa7c7e584789d8e843aa71be1078e4fb53d5533893d566b38296d11e241a8b4
3
+ size 4920117840
model-00015-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c50dd1f6aad9db8ca41dd7ea0e576f67907543f98ced456cfe8a8a883ba6c288
3
+ size 4920117848
model-00016-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66706dc2b39a133646bc7fb9df491daa266c03c879a4745d08b2f2efa3a35d0e
3
+ size 4866487064
model-00017-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9dfadc14e9cd85b62c68d3f5a0a4e55caa4b904a03ee1c59a31def9aae6d96b9
3
+ size 4920117840
model-00018-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4d3863f16be8228c8f40faf50f7f4474b353c01d0c6e2a6dfabb9fab38e7eec4
3
+ size 4850102840
model-00020-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a801045093daf54aed4ba5bb3dac56735f0a6b2fd0c2d8aee8bd64f7732dd52
3
+ size 4920117856
model-00021-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90b4e678f66691b7768bcf8a79cf20b4d8a7cfc4f1c4d474a798ec09cb0e3813
3
+ size 4866487064
model-00022-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59e1d75213b6a41cdebe5d43e77ecaf48ea5d7d9c0ab122c5e3b6a899a7d053d
3
+ size 4920117840
model-00024-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85d5a45494eb59cb655c6b5d78ee442cb092d409ed24cb8760129c9c4f11ca19
3
+ size 4920117840
model-00025-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1290ef419446ffe8c5b68b153140c5bc7a48d697e7f4dd7bbb843c59c460a8e7
3
+ size 4920117856
model-00026-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:760d87fe8283e5f327e66b300f84d2311be447e2ec17c23af9d9245b378ff77d
3
+ size 4866487096
model-00027-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa65f32cb40c0cf7be9ad6113cdd89578e98d98d1687c3143209934fe96d9a43
3
+ size 4920117864
model-00028-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0249369113099502128f22d3c781de23a934807b4aa723fb219fc156b3df1b6a
3
+ size 4850102872
model-00029-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efd85aa8125e3609410da814f36661b96717633a2f986c54a0d2898706e5883b
3
+ size 4920117856
model-00030-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ac161eb40edb2640bb962fd5873428c5f71d17254bbe94a30eb13894842305d
3
+ size 4920117872
model-00031-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d189b7cb342f17aeb19a6e1ed06244bc2997aab839458967bc2a3e31004b5b2
3
+ size 4866487088
model-00032-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5cf983ac6399c00088ad2347af1012f9b0e9cc8fb3a88f0d19aa14afe24ab8f
3
+ size 4920117864
model-00033-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c57800815d89f448b15d35b5b32dff40d01dd35ff68fe7a92d72f20b2a421dd8
3
+ size 4850102864
model-00034-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e0fa12c052d7f774bfcb71d9adfd2d398c4ad878f587fc999676d9b8434fe8d
3
+ size 4920117856
model-00035-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c96bebc1a5e303b57bd63668da73e91c81f1d1f7b161b64d746568dda7f54af3
3
+ size 4920143696
model-00036-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74568df61c7804831ff096487bda8a3d39f5147987aefe5e6862b0c3bca463a8
3
+ size 4866461264
model-00037-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b2dbde2a8b6bf01ee95ce2171c609e058abeadb9e39ccdc8af69c99d422e131
3
+ size 4920117864
model-00038-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de6521b18d33870aa70d4d1ebd2f66b6ea5187687cb36c73fb7fe41950cf2652
3
+ size 4850102872
model-00039-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6b24c967a24f0a6c3619a8ba8e0322d359238b3ba9cb38bad29e542981c1bf7
3
+ size 4920117856
model-00040-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52ca878d83b099325e23c2087a6f737b86fc8a9c6093247c31feb4e55ef69c99
3
+ size 4866077400
model-00041-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec1c1cee2388cd770e1e9766930eebba23f947551e9ad380cb5bda3fa824945e
3
+ size 4920527560
model-00042-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b27e63ae7cabc6d3a3bef92218d62350ec0999561e8413e353763fb648e8b65
3
+ size 4920117864
model-00043-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16259d9b3f4952235db6214d7484a334667ab4382f25e10bcdc1deb0738a532b
3
+ size 4850102864
model-00045-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a69a7c13a3937bddf0e9ebbc78d581301a7b006262d7f9ca26934a8722da093
3
+ size 4866077408
model-00046-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:883025ef41917df16cedbe89a0438007458322804880864f03d19322432b873f
3
+ size 4920527560
model-00047-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf23ad64e54e9af98f277f15d8390557c92cdd29a2339b4dc18ee0f69d5757cb
3
+ size 4920117872
model-00048-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:faab12329eb7c2f64918013a072c0f9535e864e72752096014f88a5dd260ff3e
3
+ size 4850102864
model-00049-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:635b541f30b8230912f540868a0a0495429feb9e10e4df5cbb670a2bc436723d
3
+ size 4920117856
model-00051-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00402abef7e9d691ec8d230d356759e7d0da82e0ca5d1a5d64a66c0c758006ef
3
+ size 4920527560
model-00052-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4699b80219cc2d2aaaf0af71ddae6135752239110bcb1119b56f007bcd787a1
3
+ size 4920117872
model-00053-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2b536296cb090fdc278e2f491dc6309764880e9b58c12383f2abcbd358caed0
3
+ size 4850102864
model-00057-of-00159.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c567719eaf83129aeadd9bb237a34050fe06b29c7878a87bc839b24da01c7263
3
+ size 4920117872