update to new format
Browse files- README.md +1 -1
- params.json +9 -0
- CodeLlama-7b-Python.npz → weights.npz +2 -2
README.md
CHANGED
@@ -29,7 +29,7 @@ export HF_HUB_ENABLE_HF_TRANSFER=1
|
|
29 |
huggingface-cli download --local-dir CodeLlama-7b-Python-mlx mlx-llama/CodeLlama-7b-Python-mlx
|
30 |
|
31 |
# Run example
|
32 |
-
python mlx-examples/llama/llama.py CodeLlama-7b-Python-mlx/
|
33 |
```
|
34 |
|
35 |
Please, refer to the [original model card](https://github.com/facebookresearch/codellama/blob/main/MODEL_CARD.md) for details on CodeLlama.
|
|
|
29 |
huggingface-cli download --local-dir CodeLlama-7b-Python-mlx mlx-llama/CodeLlama-7b-Python-mlx
|
30 |
|
31 |
# Run example
|
32 |
+
python mlx-examples/llama/llama.py CodeLlama-7b-Python-mlx/ CodeLlama-7b-Python-mlx/tokenizer.model "def fibonacci("
|
33 |
```
|
34 |
|
35 |
Please, refer to the [original model card](https://github.com/facebookresearch/codellama/blob/main/MODEL_CARD.md) for details on CodeLlama.
|
params.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"dim": 4096,
|
3 |
+
"n_layers": 32,
|
4 |
+
"n_heads": 32,
|
5 |
+
"multiple_of": 256,
|
6 |
+
"ffn_dim_multiplier": 1.0,
|
7 |
+
"norm_eps": 1e-5,
|
8 |
+
"rope_theta": 1000000
|
9 |
+
}
|
CodeLlama-7b-Python.npz → weights.npz
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a63f8288d2e0a6f5140169662eb3d27af8d9829ec3b4d8d186427b2ed26717e5
|
3 |
+
size 13476919308
|