gabegoodhart commited on
Commit
4f97bab
·
verified ·
1 Parent(s): 3b3fee4

Remove <sep> from tokenizer special tokens

Browse files

## Description

The original `granite-3.1-2b-instruct` model had these same errant `<sep>` tokens that were removed after the initial upload. With these in place, the model cannot be converted to GGUF using `convert_hf_to_gguf.py`. Error below:

```sh
...
INFO:hf-to-gguf:Set model parameters
INFO:hf-to-gguf:gguf: context length = 131072
INFO:hf-to-gguf:gguf: embedding length = 2048
INFO:hf-to-gguf:gguf: feed forward length = 8192
INFO:hf-to-gguf:gguf: head count = 32
INFO:hf-to-gguf:gguf: key-value head count = 8
INFO:hf-to-gguf:gguf: rope theta = 5000000.0
INFO:hf-to-gguf:gguf: rms norm epsilon = 1e-05
INFO:hf-to-gguf:gguf: file type = 1
INFO:hf-to-gguf:gguf: (granite) attention_scale = 0.015625
INFO:hf-to-gguf:gguf: (granite) embedding_scale = 12.0
INFO:hf-to-gguf:gguf: (granite) residual_scale = 0.22
INFO:hf-to-gguf:gguf: (granite) logits_scale = 8.0
INFO:hf-to-gguf:Set model tokenizer
Traceback (most recent call last):
File "/Users/ghart/Projects/github/ggerganov/llama.cpp/convert_hf_to_gguf.py", line 1569, in set_vocab
self._set_vocab_sentencepiece()
File "/Users/ghart/Projects/github/ggerganov/llama.cpp/convert_hf_to_gguf.py", line 792, in _set_vocab_sentencepiece
tokens, scores, toktypes = self._create_vocab_sentencepiece()
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/ghart/Projects/github/ggerganov/llama.cpp/convert_hf_to_gguf.py", line 809, in _create_vocab_sentencepiece
raise FileNotFoundError(f"File not found: {tokenizer_path}")
FileNotFoundError: File not found: granite-guardian-3.1-2b/tokenizer.model

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
File "/Users/ghart/Projects/github/ggerganov/llama.cpp/convert_hf_to_gguf.py", line 1572, in set_vocab
self._set_vocab_llama_hf()
File "/Users/ghart/Projects/github/ggerganov/llama.cpp/convert_hf_to_gguf.py", line 884, in _set_vocab_llama_hf
vocab = gguf.LlamaHfVocab(self.dir_model)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Users/ghart/Projects/github/ggerganov/llama.cpp/gguf-py/gguf/vocab.py", line 390, in __init__
raise FileNotFoundError('Cannot find Llama BPE tokenizer')
FileNotFoundError: Cannot find Llama BPE tokenizer

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
File "/Users/ghart/Projects/github/ggerganov/llama.cpp/convert_hf_to_gguf.py", line 5140, in <module>
main()
File "/Users/ghart/Projects/github/ggerganov/llama.cpp/convert_hf_to_gguf.py", line 5134, in main
model_instance.write()
File "/Users/ghart/Projects/github/ggerganov/llama.cpp/convert_hf_to_gguf.py", line 440, in write
self.prepare_metadata(vocab_only=False)
File "/Users/ghart/Projects/github/ggerganov/llama.cpp/convert_hf_to_gguf.py", line 433, in prepare_metadata
self.set_vocab()
File "/Users/ghart/Projects/github/ggerganov/llama.cpp/convert_hf_to_gguf.py", line 1575, in set_vocab
self._set_vocab_gpt2()
File "/Users/ghart/Projects/github/ggerganov/llama.cpp/convert_hf_to_gguf.py", line 728, in _set_vocab_gpt2
tokens, toktypes, tokpre = self.get_vocab_base()
^^^^^^^^^^^^^^^^^^^^^
File "/Users/ghart/Projects/github/ggerganov/llama.cpp/convert_hf_to_gguf.py", line 524, in get_vocab_base
assert max(tokenizer.vocab.values()) < vocab_size
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
AssertionError
```

Files changed (3) hide show
  1. added_tokens.json +0 -1
  2. tokenizer.json +1 -10
  3. tokenizer_config.json +1 -9
added_tokens.json CHANGED
@@ -1,5 +1,4 @@
1
  {
2
- "<sep>": 49155,
3
  "<|end_of_role|>": 49153,
4
  "<|start_of_role|>": 49152,
5
  "<|tool_call|>": 49154
 
1
  {
 
2
  "<|end_of_role|>": 49153,
3
  "<|start_of_role|>": 49152,
4
  "<|tool_call|>": 49154
tokenizer.json CHANGED
@@ -200,15 +200,6 @@
200
  "rstrip": false,
201
  "normalized": false,
202
  "special": true
203
- },
204
- {
205
- "id": 49155,
206
- "content": "<sep>",
207
- "single_word": false,
208
- "lstrip": false,
209
- "rstrip": false,
210
- "normalized": false,
211
- "special": true
212
  }
213
  ],
214
  "normalizer": null,
@@ -244960,4 +244951,4 @@
244960
  ]
244961
  ]
244962
  }
244963
- }
 
200
  "rstrip": false,
201
  "normalized": false,
202
  "special": true
 
 
 
 
 
 
 
 
 
203
  }
204
  ],
205
  "normalizer": null,
 
244951
  ]
244952
  ]
244953
  }
244954
+ }
tokenizer_config.json CHANGED
@@ -177,14 +177,6 @@
177
  "rstrip": false,
178
  "single_word": false,
179
  "special": true
180
- },
181
- "49155": {
182
- "content": "<sep>",
183
- "lstrip": false,
184
- "normalized": false,
185
- "rstrip": false,
186
- "single_word": false,
187
- "special": true
188
  }
189
  },
190
  "additional_special_tokens": [
@@ -204,4 +196,4 @@
204
  "tokenizer_class": "GPT2Tokenizer",
205
  "unk_token": "<|end_of_text|>",
206
  "vocab_size": 49152
207
- }
 
177
  "rstrip": false,
178
  "single_word": false,
179
  "special": true
 
 
 
 
 
 
 
 
180
  }
181
  },
182
  "additional_special_tokens": [
 
196
  "tokenizer_class": "GPT2Tokenizer",
197
  "unk_token": "<|end_of_text|>",
198
  "vocab_size": 49152
199
+ }