sharpenb commited on
Commit
084d189
·
verified ·
1 Parent(s): d88a70b

Upload folder using huggingface_hub (#2)

Browse files

- 22d3e49937519683cb3cc931967d5bcbc70439df3c711af9014f9197736187e3 (0c9885954e2f427e98a40cb5be38582bdd47346a)
- c810c1a7cf221ef5e0d9028d39c84c86d31efd1899c1ba5676411820ce4c4f14 (6677f879d3651b641a7f3dcae307849c627d5aa5)

Files changed (3) hide show
  1. config.json +1 -1
  2. model.safetensors +1 -1
  3. smash_config.json +1 -1
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "/covalent/.cache/models/tmpqg3zzw56pjb3pppg",
3
  "architectures": [
4
  "Gemma2ForCausalLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "/covalent/.cache/models/tmpxsr71ohu6yp2y6e6",
3
  "architectures": [
4
  "Gemma2ForCausalLM"
5
  ],
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:12b1c1481e7c89d8505a06e8886265a6a2c62dd91db4922fa5daa81059e21a22
3
  size 3207302046
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:309e7d1eb55cc1f983083d5115e8a73699094bfa4042582525c9ece237c2d225
3
  size 3207302046
smash_config.json CHANGED
@@ -28,7 +28,7 @@
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
- "cache_dir": "/covalent/.cache/models/tmpqg3zzw56",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}
 
28
  "quant_llm-int8_weight_bits": 8,
29
  "max_batch_size": 1,
30
  "device": "cuda",
31
+ "cache_dir": "/covalent/.cache/models/tmpxsr71ohu",
32
  "task": "",
33
  "save_load_fn": "bitsandbytes",
34
  "save_load_fn_args": {}