sharpenb commited on
Commit
b75be0c
·
verified ·
1 Parent(s): 41aabcc

1fde032c6306fe8d91a4d15e035a1d22ec52cdb9848bf56d4cb431325f47c0e9

Browse files
Files changed (3) hide show
  1. config.json +2 -2
  2. model.safetensors +1 -1
  3. smash_config.json +1 -1
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/covalent/.cache/models/tmpluu9d7tw2yfe3gap",
3
  "architectures": [
4
  "KPhi3ForCausalLM"
5
  ],
@@ -25,7 +25,7 @@
25
  "num_hidden_layers": 4,
26
  "num_key_value_heads": 20,
27
  "original_max_position_embeddings": 4096,
28
- "pad_token_id": 0,
29
  "quantization_config": {
30
  "_load_in_4bit": false,
31
  "_load_in_8bit": true,
 
1
  {
2
+ "_name_or_path": "/covalent/.cache/models/tmp57bf60qtms7lqfzq",
3
  "architectures": [
4
  "KPhi3ForCausalLM"
5
  ],
 
25
  "num_hidden_layers": 4,
26
  "num_key_value_heads": 20,
27
  "original_max_position_embeddings": 4096,
28
+ "pad_token_id": 32000,
29
  "quantization_config": {
30
  "_load_in_4bit": false,
31
  "_load_in_8bit": true,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0110234d8d49d564914daa53228c675cc0e0fa665aacd5a3ea87e82528c504df
3
  size 201550688
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:77208107c40186ffe47a1d95c6195cd33ea97995a7e3af9c0e2d54db0670bd5e
3
  size 201550688
smash_config.json CHANGED
@@ -28,7 +28,7 @@
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
- "cache_dir": "/covalent/.cache/models/tmpluu9d7tw",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}
 
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
+ "cache_dir": "/covalent/.cache/models/tmp57bf60qt",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}