Update config and instructions
Browse files- .gitattributes +1 -0
- README.md +5 -7
- config.json +3 -3
- special_tokens_map.json +1 -23
- tokenizer.model → tokenizer.json +2 -2
- tokenizer_config.json +0 -0
.gitattributes
CHANGED
@@ -34,3 +34,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
nemo/*.nemo filter=lfs diff=lfs merge=lfs -text
|
|
|
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
nemo/*.nemo filter=lfs diff=lfs merge=lfs -text
|
37 |
+
tokenizer.json filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
@@ -5,7 +5,7 @@ license_link: >-
|
|
5 |
https://developer.download.nvidia.com/licenses/nvidia-open-model-license-agreement-june-2024.pdf
|
6 |
---
|
7 |
|
8 |
-
# Minitron
|
9 |
|
10 |
Minitron is a family of small language models (SLMs) obtained by pruning NVIDIA's [Nemotron-4 15B](https://arxiv.org/abs/2402.16819) model. We prune model embedding size, attention heads, and MLP intermediate dimension, following which, we perform continued training with distillation to arrive at the final models.
|
11 |
|
@@ -15,14 +15,12 @@ Minitron models are for research and development only.
|
|
15 |
|
16 |
## HuggingFace Quickstart
|
17 |
|
18 |
-
|
19 |
|
20 |
```
|
21 |
-
|
22 |
-
cd transformers
|
23 |
-
git checkout 63d9cb0
|
24 |
-
pip install .
|
25 |
```
|
|
|
26 |
The following code provides an example of how to load the Minitron-4B model and use it to perform text generation.
|
27 |
|
28 |
```python
|
@@ -30,7 +28,7 @@ import torch
|
|
30 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
31 |
|
32 |
# Load the tokenizer and model
|
33 |
-
model_path = 'nvidia/Minitron-4B-Base'
|
34 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
35 |
|
36 |
device = 'cuda'
|
|
|
5 |
https://developer.download.nvidia.com/licenses/nvidia-open-model-license-agreement-june-2024.pdf
|
6 |
---
|
7 |
|
8 |
+
# Nemotron-4-Minitron-4B-Base
|
9 |
|
10 |
Minitron is a family of small language models (SLMs) obtained by pruning NVIDIA's [Nemotron-4 15B](https://arxiv.org/abs/2402.16819) model. We prune model embedding size, attention heads, and MLP intermediate dimension, following which, we perform continued training with distillation to arrive at the final models.
|
11 |
|
|
|
15 |
|
16 |
## HuggingFace Quickstart
|
17 |
|
18 |
+
Support for Nemotron models will be added in the upcoming transformers library release. In the meantime, please install the library from source:
|
19 |
|
20 |
```
|
21 |
+
pip install git+https://github.com/huggingface/transformers
|
|
|
|
|
|
|
22 |
```
|
23 |
+
|
24 |
The following code provides an example of how to load the Minitron-4B model and use it to perform text generation.
|
25 |
|
26 |
```python
|
|
|
28 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
29 |
|
30 |
# Load the tokenizer and model
|
31 |
+
model_path = 'nvidia/Nemotron-4-Minitron-4B-Base'
|
32 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
33 |
|
34 |
device = 'cuda'
|
config.json
CHANGED
@@ -16,11 +16,11 @@
|
|
16 |
"num_key_value_heads": 8,
|
17 |
"norm_eps": 1e-05,
|
18 |
"rope_theta": 10000,
|
19 |
-
"
|
20 |
-
"rope_scaling": null,
|
21 |
"tie_word_embeddings": false,
|
22 |
"torch_dtype": "bfloat16",
|
23 |
"transformers_version": "4.32.0.dev0",
|
24 |
"use_cache": true,
|
25 |
-
"vocab_size": 256000
|
|
|
26 |
}
|
|
|
16 |
"num_key_value_heads": 8,
|
17 |
"norm_eps": 1e-05,
|
18 |
"rope_theta": 10000,
|
19 |
+
"partial_rotary_factor": 0.5,
|
|
|
20 |
"tie_word_embeddings": false,
|
21 |
"torch_dtype": "bfloat16",
|
22 |
"transformers_version": "4.32.0.dev0",
|
23 |
"use_cache": true,
|
24 |
+
"vocab_size": 256000,
|
25 |
+
"head_dim": 128
|
26 |
}
|
special_tokens_map.json
CHANGED
@@ -1,23 +1 @@
|
|
1 |
-
{
|
2 |
-
"bos_token": {
|
3 |
-
"content": "<s>",
|
4 |
-
"lstrip": false,
|
5 |
-
"normalized": false,
|
6 |
-
"rstrip": false,
|
7 |
-
"single_word": false
|
8 |
-
},
|
9 |
-
"eos_token": {
|
10 |
-
"content": "</s>",
|
11 |
-
"lstrip": false,
|
12 |
-
"normalized": false,
|
13 |
-
"rstrip": false,
|
14 |
-
"single_word": false
|
15 |
-
},
|
16 |
-
"unk_token": {
|
17 |
-
"content": "<unk>",
|
18 |
-
"lstrip": false,
|
19 |
-
"normalized": false,
|
20 |
-
"rstrip": false,
|
21 |
-
"single_word": false
|
22 |
-
}
|
23 |
-
}
|
|
|
1 |
+
{}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tokenizer.model → tokenizer.json
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:83d0648daa0467fb02ddef7ff25460321dab2fbb20c280ae0bc1ea8052f7df90
|
3 |
+
size 18143149
|
tokenizer_config.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|