Text Generation
Transformers
PyTorch
longllama
code
custom_code
Eval Results
long_llama_3b_v1_1 / tokenizer_config.json
Szymon Tworkowski
init release
6711cdb
raw
history blame contribute delete
No virus
617 Bytes
{"bos_token": "", "eos_token": {"__type": "AddedToken", "content": "</s>", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false}, "model_max_length": 1000000000000000019884624838656, "tokenizer_class": "LlamaTokenizer", "unk_token": {"__type": "AddedToken", "content": "<unk>", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false}, "add_bos_token": true, "add_eos_token": false, "pad_token": null, "sp_model_kwargs": {}, "clean_up_tokenization_spaces": {"__type": "AddedToken", "content": "<s>", "lstrip": false, "normalized": true, "rstrip": false, "single_word": false}}