fsaudm commited on
Commit
271169b
1 Parent(s): 9ab77e6

Upload LlamaForCausalLM

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "fsaudm/Meta-Llama-3.1-8B-Instruct-INT4",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
@@ -25,11 +25,11 @@
25
  "quantization_config": {
26
  "_load_in_4bit": true,
27
  "_load_in_8bit": false,
28
- "bnb_4bit_compute_dtype": "float32",
29
  "bnb_4bit_quant_storage": "uint8",
30
- "bnb_4bit_quant_type": "fp4",
31
- "bnb_4bit_use_double_quant": false,
32
- "llm_int8_enable_fp32_cpu_offload": false,
33
  "llm_int8_has_fp16_weight": false,
34
  "llm_int8_skip_modules": null,
35
  "llm_int8_threshold": 6.0,
 
1
  {
2
+ "_name_or_path": "meta-llama/Meta-Llama-3.1-8B-Instruct",
3
  "architectures": [
4
  "LlamaForCausalLM"
5
  ],
 
25
  "quantization_config": {
26
  "_load_in_4bit": true,
27
  "_load_in_8bit": false,
28
+ "bnb_4bit_compute_dtype": "bfloat16",
29
  "bnb_4bit_quant_storage": "uint8",
30
+ "bnb_4bit_quant_type": "nf4",
31
+ "bnb_4bit_use_double_quant": true,
32
+ "llm_int8_enable_fp32_cpu_offload": true,
33
  "llm_int8_has_fp16_weight": false,
34
  "llm_int8_skip_modules": null,
35
  "llm_int8_threshold": 6.0,
model-00001-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:167f1d23187acb72e80bce74c54e61b7da3a25eae1c207bfc3a6a6425077cb9c
3
- size 4977222760
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3c4a79180091cae0d86a4dbb6a4ff24af3e5058c8be5930cba7e39113eebd854
3
+ size 4651954871
model-00002-of-00002.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9bb7a5c6402e8a0f3838d4d45f59e5f3fff4f0f6375ef5c71b2b4604cccded4c
3
  size 1050673280
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5321ca97dd0968a3e2b676b3a42728e9f5327d516bcf0addd4e9cd61c068a1ad
3
  size 1050673280
model.safetensors.index.json CHANGED
The diff for this file is too large to render. See raw diff