anemll commited on
Commit
06e9e65
·
verified ·
1 Parent(s): f3d2b05

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
DeepHermes_FFN_PF_lut6_chunk_01of08.mlmodelc.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cf5a2152a84f58b8964f11a40ec58b1f2080c1d2cb232c4d455ffec5b85c69e
3
+ size 669194848
DeepHermes_FFN_PF_lut6_chunk_02of08.mlmodelc.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b4c79d8253d90703e431d264d7143412233b569b3e44e4e1606662f759095d3
3
+ size 669987695
DeepHermes_FFN_PF_lut6_chunk_03of08.mlmodelc.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ca23d87d410d6395044a810fb31c1a1f85c64d9a5168393501f9dd853caa21d6
3
+ size 669790438
DeepHermes_FFN_PF_lut6_chunk_04of08.mlmodelc.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f0e59957ebeddc576260f0910b7644d21e477fa90a429b84e65b099e568184f6
3
+ size 669567176
DeepHermes_FFN_PF_lut6_chunk_05of08.mlmodelc.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af2e5d14e4bb5ad8d56e604f492682e8bbd3afd60c8f7aba3a5770764181b52d
3
+ size 670076841
DeepHermes_FFN_PF_lut6_chunk_06of08.mlmodelc.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3bfb49d2d0ce363e1ff3dc78ae6272e9f56305c31c335c800e6c06257bf5919
3
+ size 670543405
DeepHermes_FFN_PF_lut6_chunk_07of08.mlmodelc.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ecf061786d42fc0aa1bedfabba2029b53a9d25549192017eed115a2da1cd6df
3
+ size 670496528
DeepHermes_FFN_PF_lut6_chunk_08of08.mlmodelc.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0eb9c24fc0fbc7ee386a6a5e159b40c79babc76561c5cbffa67773aa2de558f6
3
+ size 669493021
DeepHermes_embeddings.mlmodelc.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:604b4674a3cab94e50fb0dd8a94118627b225fabdb077a8c4fb8912f4793e214
3
+ size 807388109
DeepHermes_lm_head_lut6.mlmodelc.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02630d282aacf9147b5a5a242493064f8aac80b665d0b73a5606efc411532de6
3
+ size 807908333
README.md ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ tags:
4
+ - coreml
5
+ - ANE
6
+ - DeepSeek
7
+ - Apple
8
+ - Apple Neural Engine
9
+ ---
10
+ # ANEMLL
11
+
12
+ **ANEMLL** (pronounced like "animal") is an open-source project focused on accelerating the porting of Large Language Models (LLMs) to tensor processors, starting with the Apple Neural Engine (ANE).
13
+
14
+ The goal is to provide a fully open-source pipeline from model conversion to inference for common LLM architectures running on ANE.
15
+
16
+ This enables seamless integration and on-device inference for low-power applications on edge devices, ensuring maximum privacy and security.
17
+
18
+ This is critical for autonomous applications, where models run directly on the device without requiring an internet connection.
19
+
20
+ ---
21
+
22
+ ## License
23
+
24
+ ANEMLL is licensed under the [MIT License](https://opensource.org/license/mit).
25
+ The model is based on Meta's LLaMA 3.2 and may require a separate license.
26
+
27
+ This test model is exclusively for the Meta's LLaMA 3.2 1B (1024 context) model converted for CoreML, released before the official launch of the ANEMLL repository and minimal documentation. It is intended for early adopters only who requested an early release.
28
+
29
+ ---
30
+
31
+ ## Requirements
32
+
33
+ - **macOS Sequoia** with Apple Neural Engine and 16GB RAM
34
+ - **CoreML Tools** and **HuggingFace Transformers** libraries
35
+ - **Python 3.9**
36
+
37
+ `chat.py` provides a sample inference script.
38
+ `chat_full.py` provides a sample inference script with history and conversation management.
39
+
40
+ **Installation**
41
+
42
+ 1. Download the model from Hugging Face:
43
+ ```bash
44
+ # Install required tools
45
+ pip install huggingface_hub
46
+
47
+ # Install Git LFS (Large File Support)
48
+ # macOS with Homebrew:
49
+ brew install git-lfs
50
+ # Or Ubuntu/Debian:
51
+ # sudo apt-get install git-lfs
52
+
53
+ # Initialize Git LFS
54
+ git lfs install
55
+
56
+ # Clone the repository with model files
57
+ git clone https://huggingface.co/anemll/anemll-DeepHermes-3-Llama-3-8B-Preview-ctx1024_0.1.1
58
+ ```
59
+
60
+ 2. Extract model files:
61
+ ```bash
62
+ # Navigate to cloned directory
63
+ cd anemll-DeepHermes-3-Llama-3-8B-Preview-ctx1024_0.1.1
64
+
65
+ # Pull LFS files (model weights)
66
+ git lfs pull
67
+
68
+ # Extract CoreML model files
69
+ find . -type f -name "*.zip" -exec unzip {} \;
70
+ ```
71
+
72
+ 3. Install dependencies:
73
+ ```bash
74
+ pip install coremltools transformers
75
+ ```
76
+
77
+ **Coremltools:**
78
+
79
+ See coremltools installation guide at https://coremltools.readme.io/v4.0/docs/installation
80
+
81
+ **How to Run**
82
+
83
+ 1. Basic chat interface:
84
+ ```bash
85
+ python chat.py --meta ./meta.yaml
86
+ ```
87
+
88
+ 2. Full conversation mode with history:
89
+ ```bash
90
+ python chat_full.py --meta ./meta.yaml
91
+ ```
92
+
93
+ > Note: The first time the model loads, macOS will take some time to place it on the device.
94
+ > Subsequent loads will be instantaneous.
95
+ > Use Ctrl-D to exit, Ctrl-C to interrupt inference.
96
+
97
+ **More Info**
98
+ Please check following links for later updates:
99
+
100
+ * [GitHub](https://github.com/anemll)
101
+ * [Hugging Face Models](https://huggingface.co/anemll)
102
+ * [Twitter/X](https://x.com/anemll)
103
+ * [Website](https://anemll.com)
104
+
105
+
106
chat.py ADDED
@@ -0,0 +1,819 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # chat.py
2
+ #!/usr/bin/env python3
3
+ # chat.py
4
+ # Copyright (c) 2025 Anemll
5
+ # Licensed under the MIT License
6
+
7
+ import argparse
8
+ import os
9
+ import re
10
+ import glob
11
+ from pathlib import Path
12
+ import coremltools as ct
13
+ from transformers import LlamaTokenizer, AutoTokenizer
14
+ import torch
15
+ import torch.nn.functional as F
16
+ import numpy as np
17
+ import queue
18
+ import threading
19
+ import time
20
+ import yaml
21
+ import sys
22
+
23
+ # ANSI color codes
24
+ LIGHT_BLUE = "\033[94m"
25
+ DARK_BLUE = "\033[34m"
26
+ LIGHT_GREEN = "\033[92m"
27
+ RESET_COLOR = "\033[0m"
28
+
29
+ # Add at top with other constants
30
+ WARMUP_TOKEN_LIMIT = 10 # Maximum tokens to generate during warmup
31
+
32
+ class TokenPrinter:
33
+ """Handles background printing of generated tokens."""
34
+ def __init__(self, tokenizer):
35
+ self.tokenizer = tokenizer
36
+ self.token_queue = queue.Queue()
37
+ self.stop_event = threading.Event()
38
+ self.thread = None
39
+ self.buffer = ""
40
+ self.lock = threading.Lock()
41
+ self.thinking = True # Track if we're still in thinking mode
42
+ self.decoding_buffer = [] # Buffer for token IDs
43
+ # Add token counting and timing
44
+ self.start_time = time.time()
45
+ self.token_count = 0
46
+ self.start()
47
+
48
+ def start(self):
49
+ """Start the printer thread."""
50
+ if self.thread is None:
51
+ self.thread = threading.Thread(target=self._print_worker)
52
+ self.thread.daemon = True
53
+ self.thread.start()
54
+
55
+ def add_token(self, token_id):
56
+ """Add a token to the print queue."""
57
+ if not self.stop_event.is_set():
58
+ self.token_queue.put(token_id)
59
+ self.token_count += 1
60
+
61
+ def drain_buffer(self):
62
+ """Decode token IDs from decoding_buffer in the main thread."""
63
+ if not self.decoding_buffer:
64
+ return
65
+
66
+ # Decode all tokens at once in the main thread
67
+ token_str = self.tokenizer.decode(self.decoding_buffer)
68
+ self.decoding_buffer.clear()
69
+
70
+ # Color-handling logic
71
+ if self.thinking and "</think>" in token_str:
72
+ self.thinking = False
73
+ parts = token_str.split("</think>")
74
+ if len(parts) > 0:
75
+ print(parts[0] + "</think>", end='', flush=True)
76
+ if len(parts) > 1:
77
+ print(LIGHT_BLUE + parts[1], end='', flush=True)
78
+ else:
79
+ if not self.thinking:
80
+ print(LIGHT_BLUE + token_str, end='', flush=True)
81
+ else:
82
+ print(token_str, end='', flush=True)
83
+
84
+ def _print_worker(self):
85
+ """Worker thread that takes token_ids from the queue."""
86
+ while not self.stop_event.is_set():
87
+ try:
88
+ token_id = self.token_queue.get(timeout=0.01)
89
+ with self.lock:
90
+ self.decoding_buffer.append(token_id)
91
+ self.token_queue.task_done()
92
+ except queue.Empty:
93
+ continue
94
+ except Exception as e:
95
+ print(f"\nError: Token printer error: {str(e)}")
96
+ break
97
+
98
+ def stop(self):
99
+ """Stop the printer thread."""
100
+ if self.thread and self.thread.is_alive():
101
+ self.stop_event.set()
102
+ try:
103
+ self.thread.join(timeout=1.0)
104
+ except Exception:
105
+ pass
106
+ # Calculate and print tokens/s with shorter format in blue
107
+ elapsed = time.time() - self.start_time
108
+ if elapsed > 0 and self.token_count > 0:
109
+ tokens_per_sec = self.token_count / elapsed
110
+ print(f"\n{DARK_BLUE}{tokens_per_sec:.1f} t/s{RESET_COLOR}")
111
+ else:
112
+ print(RESET_COLOR) # Reset color at the end
113
+ return self.buffer
114
+
115
+ def parse_model_path(path):
116
+ """Parse model path and return full path with .mlmodelc or .mlpackage extension."""
117
+ path = Path(path)
118
+
119
+ # If path exists exactly as specified, return it
120
+ if path.exists():
121
+ return str(path)
122
+
123
+ # Try with both extensions
124
+ candidates = [
125
+ path, # Original path
126
+ path.with_suffix('.mlmodelc'), # With .mlmodelc
127
+ path.with_suffix('.mlpackage'), # With .mlpackage
128
+ Path(str(path) + '.mlmodelc'), # Handle case where extension is included
129
+ Path(str(path) + '.mlpackage')
130
+ ]
131
+
132
+ # Try all possible paths
133
+ for candidate in candidates:
134
+ if candidate.exists():
135
+ print(f"Found model at: {candidate}")
136
+ return str(candidate)
137
+
138
+ # If we get here, no valid path was found
139
+ print("\nError: Model not found. Tried following paths:")
140
+ for candidate in candidates:
141
+ print(f" {candidate}")
142
+ raise FileNotFoundError(f"Model not found: {path}")
143
+
144
+ def parse_ffn_filename(path):
145
+ """Parse FFN model filename to extract chunk information."""
146
+ path = Path(path)
147
+ pattern = r'FFN_PF.*_chunk_(\d+)of(\d+)'
148
+ match = re.search(pattern, path.name)
149
+
150
+ if match:
151
+ current_chunk = int(match.group(1))
152
+ total_chunks = int(match.group(2))
153
+ return current_chunk, total_chunks
154
+ return None, None
155
+
156
+ def find_all_chunks(base_path):
157
+ """Find all chunk files matching the base FFN path pattern."""
158
+ path = Path(base_path)
159
+ pattern = re.sub(r'_chunk_\d+of\d+', '_chunk_*', str(path))
160
+ return sorted(glob.glob(pattern))
161
+
162
+ def load_model(path, function_name=None):
163
+ """Load a CoreML model, handling both .mlmodelc and .mlpackage formats."""
164
+ path = Path(path)
165
+ compute_unit = ct.ComputeUnit.CPU_AND_NE
166
+
167
+ try:
168
+ if path.suffix == '.mlmodelc':
169
+ # For compiled models (.mlmodelc), use CompiledMLModel
170
+ if function_name:
171
+ return ct.models.CompiledMLModel(str(path), compute_unit, function_name=function_name)
172
+ else:
173
+ return ct.models.CompiledMLModel(str(path), compute_unit)
174
+ else:
175
+ # For packages (.mlpackage)
176
+ if function_name:
177
+ return ct.models.MLModel(str(path), function_name=function_name)
178
+ else:
179
+ return ct.models.MLModel(str(path))
180
+
181
+ except RuntimeError as e:
182
+ if "valid manifest does not exist" in str(e):
183
+ print(f"\nError: Could not load compiled model at {path}")
184
+ print("This might be because:")
185
+ print("1. The model is not properly compiled")
186
+ print("2. The model was compiled for a different OS version")
187
+ print("3. The model needs to be recompiled")
188
+ print("\nTry using the .mlpackage version instead, or recompile the model.")
189
+ raise
190
+
191
+ def load_metadata(model,args):
192
+ # Extract metadata and config parameters
193
+ metadata = {}
194
+ if hasattr(model, 'user_defined_metadata'):
195
+ meta = model.user_defined_metadata
196
+
197
+ # Extract key parameters with defaults
198
+ metadata['context_length'] = int(meta.get('com.anemll.context_length', 512))
199
+ metadata['state_length'] = int(meta.get('com.anemll.state_length', metadata['context_length'])) # Added state_length
200
+ metadata['batch_size'] = int(meta.get('com.anemll.batch_size', 64))
201
+ metadata['lut_bits'] = int(meta.get('com.anemll.lut_bits', 0))
202
+ metadata['num_chunks'] = int(meta.get('com.anemll.num_chunks', 1))
203
+
204
+ print("\nExtracted Parameters:")
205
+ print(f" Context Length: {metadata['context_length']}")
206
+ print(f" State Length: {metadata['state_length']}")
207
+ print(f" Prefill Batch Size: {metadata['batch_size']}")
208
+ print(f" LUT Bits: {metadata['lut_bits']}")
209
+ print(f" Number of Chunks: {metadata['num_chunks']}")
210
+
211
+ # Print model info
212
+ print("\nModel Info:")
213
+ if 'com.anemll.info' in meta:
214
+ print(f" {meta['com.anemll.info']}")
215
+ if 'com.github.apple.coremltools.version' in meta:
216
+ print(f" CoreML Tools: {meta['com.github.apple.coremltools.version']}")
217
+
218
+ # Print model input/output shapes
219
+ print("\nModel Shapes:")
220
+ if hasattr(model, 'input_description'):
221
+ print(" Inputs:")
222
+ for name, desc in model.input_description.items():
223
+ print(f" {name}: {desc}")
224
+ if hasattr(model, 'output_description'):
225
+ print(" Outputs:")
226
+ for name, desc in model.output_description.items():
227
+ print(f" {name}: {desc}")
228
+ else:
229
+ print("\nWarning: No metadata found in model")
230
+
231
+ # Check if model directory name contains context length pattern (ctxXXX)
232
+ ctx_len = 512
233
+ if args.context_length is None:
234
+ import re
235
+ ctx_match = re.search(r'ctx(\d+)', str(args.d))
236
+ if ctx_match:
237
+ ctx_len0 = int(ctx_match.group(1))
238
+ if 512 <= ctx_len0 <= 8096:
239
+ ctx_len = ctx_len0
240
+ print(f"\nDetected context length {ctx_len} from directory name")
241
+ else:
242
+ print(f"\nWarning: No context length found in directory {ctx_len} from directory name {args.d}")
243
+ else:
244
+ ctx_len = args.context_length
245
+
246
+ # Use defaults
247
+ metadata['context_length'] = ctx_len
248
+ metadata['state_length'] = ctx_len
249
+ metadata['batch_size'] = 64
250
+ metadata['lut_bits'] = 4
251
+ metadata['num_chunks'] = 4
252
+ print("\nUsing default parameters:")
253
+ print(f" Context Length: {metadata['context_length']}")
254
+ print(f" State Length: {metadata['state_length']}")
255
+ print(f" Prefill Batch Size: {metadata['batch_size']}")
256
+ print(f" LUT Bits: {metadata['lut_bits']}")
257
+ print(f" Number of Chunks: {metadata['num_chunks']}")
258
+ return metadata
259
+
260
+ def load_models(args,metadata):
261
+ """Load all required models and extract metadata."""
262
+ print("\nLoading models...")
263
+
264
+ try:
265
+ # Load embeddings model
266
+ print("\nLoading embeddings model...")
267
+ embed_path = parse_model_path(args.embed)
268
+ print(f"Loading from: {embed_path}")
269
+ embed_model = load_model(embed_path)
270
+ print("Embeddings model loaded successfully")
271
+ metadata = load_metadata(embed_model,args)
272
+
273
+
274
+
275
+ # Load LM head model
276
+ print("\nLoading LM head model...")
277
+ lmhead_path = parse_model_path(args.lmhead)
278
+ print(f"Loading from: {lmhead_path}")
279
+ lmhead_model = load_model(lmhead_path)
280
+ print("LM head model loaded successfully")
281
+
282
+ # Parse FFN path and find chunks if needed
283
+ print("\nLoading FFN+PREFILL model(s)...")
284
+ ffn_path = parse_model_path(args.ffn)
285
+ chunk_no, total_chunks = parse_ffn_filename(ffn_path)
286
+
287
+ ffn_models = []
288
+ if chunk_no and total_chunks:
289
+ print(f"\nDetected chunked FFN+PREFILL model ({total_chunks} chunks)")
290
+ # Find and load all chunks
291
+ chunk_paths = find_all_chunks(ffn_path)
292
+ if len(chunk_paths) != total_chunks:
293
+ raise ValueError(f"Found {len(chunk_paths)} chunks but filename indicates {total_chunks} chunks")
294
+
295
+ for chunk_path in chunk_paths:
296
+ print(f"\nLoading FFN+PREFILL chunk: {Path(chunk_path).name}")
297
+ try:
298
+ # For chunked models, we need both infer and prefill functions
299
+ ffn_models.append({
300
+ 'infer': load_model(chunk_path, function_name='infer'),
301
+ 'prefill': load_model(chunk_path, function_name='prefill')
302
+ })
303
+ print("Chunk loaded successfully")
304
+ except Exception as e:
305
+ print(f"Error loading chunk {chunk_path}: {str(e)}")
306
+ raise
307
+ metadata = load_metadata(ffn_models[0],args)
308
+
309
+ else:
310
+ print("\nLoading single FFN model...")
311
+ ffn_models.append(load_model(ffn_path))
312
+ print("FFN model loaded successfully")
313
+
314
+ return embed_model, ffn_models, lmhead_model, metadata
315
+
316
+ except Exception as e:
317
+ print(f"\nError loading models: {str(e)}")
318
+ print("\nPlease ensure all model files exist and are accessible.")
319
+ print("Expected files:")
320
+ print(f" Embeddings: {args.embed}")
321
+ print(f" LM Head: {args.lmhead}")
322
+ print(f" FFN: {args.ffn}")
323
+ raise
324
+
325
+ # At the top of the file, make this a default path
326
+
327
+ def initialize_tokenizer(model_path=None):
328
+ """Initialize and configure the tokenizer."""
329
+ try:
330
+
331
+
332
+ tokenizer = AutoTokenizer.from_pretrained(
333
+ str(model_path),
334
+ use_fast=False,
335
+ trust_remote_code=True
336
+ )
337
+
338
+ print("\nTokenizer Configuration:")
339
+ print(f"Tokenizer type: {type(tokenizer)}")
340
+ print(f"Tokenizer name: {tokenizer.__class__.__name__}")
341
+ print(f"Vocabulary size: {len(tokenizer)}")
342
+ print(f"Model max length: {tokenizer.model_max_length}")
343
+
344
+ if tokenizer.pad_token is None:
345
+ tokenizer.pad_token = tokenizer.eos_token
346
+ tokenizer.pad_token_id = tokenizer.eos_token_id
347
+ print("Set PAD token to EOS token")
348
+
349
+ tokenizer.padding_side = "left"
350
+
351
+ print(f"\nSpecial Tokens:")
352
+ print(f"PAD token: '{tokenizer.pad_token}' (ID: {tokenizer.pad_token_id})")
353
+ print(f"EOS token: '{tokenizer.eos_token}' (ID: {tokenizer.eos_token_id})")
354
+ print(f"BOS token: '{tokenizer.bos_token}' (ID: {tokenizer.bos_token_id})")
355
+ print(f"UNK token: '{tokenizer.unk_token}' (ID: {tokenizer.unk_token_id})")
356
+
357
+ return tokenizer
358
+
359
+ except Exception as e:
360
+ print(f"\nError: Failed to load tokenizer from {model_path}")
361
+ print(f"Error details: {str(e)}")
362
+ print(f"Error type: {type(e)}")
363
+ print("\nThis code requires a Llama 3.2 model for chat template functionality.")
364
+ print("Please provide the path to a Llama 3.2 model directory.")
365
+ import traceback
366
+ traceback.print_exc()
367
+ raise
368
+
369
+
370
+
371
+ def make_causal_mask(length, start):
372
+ """Create causal attention mask."""
373
+ mask = np.full((1, 1, length, length), -np.inf, dtype=np.float16)
374
+ row_indices = np.arange(length).reshape(length, 1)
375
+ col_indices = np.arange(length).reshape(1, length)
376
+ mask[:, :, col_indices <= (row_indices + start)] = 0
377
+ return mask
378
+
379
+ def run_prefill(embed_model, ffn_models, input_ids, context_pos, context_length, batch_size=64, state=None):
380
+ """Run prefill on the input sequence."""
381
+ # Create causal mask
382
+ causal_mask = make_causal_mask(context_length, 0)
383
+ causal_mask = torch.tensor(causal_mask, dtype=torch.float16)
384
+
385
+ # Process in batches
386
+ batch_pos = 0
387
+ while batch_pos < context_pos:
388
+ batch_end = min(batch_pos + batch_size, context_pos)
389
+ current_batch_size = batch_end - batch_pos
390
+
391
+ # Get current batch
392
+ batch_input = input_ids[:, batch_pos:batch_end]
393
+
394
+ # Always pad to full batch size for prefill
395
+ batch_input = F.pad(
396
+ batch_input,
397
+ (0, batch_size - current_batch_size),
398
+ value=0
399
+ )
400
+
401
+ # Generate position IDs for full batch size
402
+ position_ids = torch.arange(batch_size, dtype=torch.int32) # Changed: Always use full batch size
403
+ batch_causal_mask = causal_mask[:, :, :batch_size, :] # Changed: Use full batch size
404
+
405
+ # Run embeddings
406
+ hidden_states = torch.from_numpy(
407
+ embed_model.predict({'input_ids': batch_input.numpy()})['hidden_states']
408
+ )
409
+
410
+ # Run through FFN chunks with state
411
+ for ffn_model in ffn_models:
412
+ if isinstance(ffn_model, dict):
413
+ inputs = {
414
+ 'hidden_states': hidden_states.numpy(), # [1, 64, hidden_size]
415
+ 'position_ids': position_ids.numpy(), # [64]
416
+ 'causal_mask': batch_causal_mask.numpy(), # [1, 1, 64, context_length]
417
+ 'current_pos': np.array([batch_pos], dtype=np.int32) # [1]
418
+ }
419
+ output = ffn_model['prefill'].predict(inputs, state)
420
+ hidden_states = torch.from_numpy(output['output_hidden_states'])
421
+
422
+ batch_pos = batch_end
423
+
424
+ return torch.tensor([context_pos], dtype=torch.int32)
425
+
426
+ def generate_next_token(embed_model, ffn_models, lmhead_model, input_ids, pos, context_length, state=None, temperature=0.0):
427
+ """Generate the next token."""
428
+ # Get current token
429
+ current_token = input_ids[:, pos-1:pos] # [1, 1]
430
+
431
+ # Run embeddings
432
+ hidden_states = torch.from_numpy(
433
+ embed_model.predict({'input_ids': current_token.numpy()})['hidden_states']
434
+ ) # [1, 1, hidden_size]
435
+
436
+ # Create masks
437
+ update_mask = torch.zeros((1, 1, context_length, 1), dtype=torch.float16)
438
+ update_mask[0, 0, pos-1, 0] = 1.0
439
+ position_ids = torch.tensor([pos-1], dtype=torch.int32) # [1]
440
+ causal_mask = make_causal_mask(context_length, 0)
441
+ causal_mask = torch.tensor(causal_mask[:, :, pos-1:pos, :], dtype=torch.float16) # [1, 1, 1, context_length]
442
+
443
+ # Run through FFN chunks with state
444
+ for ffn_model in ffn_models:
445
+ if isinstance(ffn_model, dict):
446
+ inputs = {
447
+ 'hidden_states': hidden_states.numpy(),
448
+ 'update_mask': update_mask.numpy(),
449
+ 'position_ids': position_ids.numpy(),
450
+ 'causal_mask': causal_mask.numpy(),
451
+ 'current_pos': position_ids.numpy()
452
+ }
453
+ output = ffn_model['infer'].predict(inputs, state)
454
+ hidden_states = torch.from_numpy(output['output_hidden_states'])
455
+
456
+ # Run LM head
457
+ lm_output = lmhead_model.predict({'hidden_states': hidden_states.numpy()})
458
+ # Debug print
459
+ #print("\nLM Head output keys:", list(lm_output.keys()))
460
+
461
+ # Combine logits1-8 if they exist
462
+ if 'logits1' in lm_output:
463
+ # Concatenate all logits parts
464
+ logits_parts = []
465
+ for i in range(1, 9):
466
+ key = f'logits{i}'
467
+ if key in lm_output:
468
+ logits_parts.append(torch.from_numpy(lm_output[key]))
469
+ logits = torch.cat(logits_parts, dim=-1) # Concatenate along vocab dimension
470
+ else:
471
+ # Try output_logits as fallback
472
+ logits = torch.from_numpy(lm_output['output_logits'])
473
+
474
+ # Apply temperature and sample
475
+ if temperature > 0:
476
+ logits = logits / temperature
477
+ probs = F.softmax(logits[0, -1, :], dim=-1)
478
+ next_token = torch.multinomial(probs, num_samples=1).item()
479
+ else:
480
+ next_token = torch.argmax(logits[0, -1, :]).item()
481
+
482
+ return next_token
483
+
484
+ def create_unified_state(ffn_models, context_length):
485
+ """Create unified KV cache state for transformer."""
486
+ if isinstance(ffn_models[0], dict):
487
+ # Use first FFN model's prefill function to create state
488
+ state = ffn_models[0]['prefill'].make_state()
489
+ print(f"\nCreated unified transformer state for {len(ffn_models)} chunks")
490
+ return state
491
+ else:
492
+ state = ffn_models[0].make_state()
493
+ print("\nCreated unified transformer state")
494
+ return state
495
+
496
+ def chat_loop(embed_model, ffn_models, lmhead_model, tokenizer, metadata, state, auto_prompt=None, warmup=False):
497
+ """Interactive chat loop."""
498
+ context_length = metadata.get('context_length')
499
+ batch_size = metadata.get('batch_size', 64)
500
+
501
+ if not warmup:
502
+ print(f"\nUsing context length: {context_length}")
503
+ print("\nStarting chat session. Press Ctrl+D to exit.")
504
+ print("Type your message and press Enter to chat.")
505
+
506
+ # Check if tokenizer has chat template and if it works
507
+ has_chat_template = False
508
+ try:
509
+ # Test if chat template works
510
+ test_messages = [{"role": "user", "content": "test"}]
511
+ tokenizer.apply_chat_template(test_messages, return_tensors="pt")
512
+ has_chat_template = True
513
+ if not warmup:
514
+ print("\nUsing chat template for prompts")
515
+ except:
516
+ if not warmup:
517
+ print("\nUsing manual formatting for prompts")
518
+
519
+ conversation = []
520
+
521
+ try:
522
+ while True:
523
+ try:
524
+ if not warmup:
525
+ print(f"\n{LIGHT_GREEN}You:{RESET_COLOR}", end=' ', flush=True)
526
+ if auto_prompt is not None:
527
+ user_input = auto_prompt
528
+ if not warmup:
529
+ print(user_input)
530
+ else:
531
+ user_input = input().strip()
532
+ except EOFError:
533
+ if not warmup:
534
+ print("\nExiting chat...")
535
+ break
536
+
537
+ if not user_input:
538
+ continue
539
+
540
+ # Format prompt based on tokenizer capabilities
541
+ if has_chat_template:
542
+ messages = [{"role": "user", "content": user_input}]
543
+ input_ids = tokenizer.apply_chat_template(
544
+ messages,
545
+ return_tensors="pt",
546
+ add_generation_prompt=True
547
+ ).to(torch.int32)
548
+ else:
549
+ # Manual formatting for Llama models without chat template
550
+ formatted_prompt = f"[INST] {user_input} [/INST]"
551
+ input_ids = tokenizer(
552
+ formatted_prompt,
553
+ return_tensors="pt",
554
+ add_special_tokens=True
555
+ ).input_ids.to(torch.int32)
556
+
557
+ context_pos = input_ids.size(1)
558
+
559
+ if not warmup:
560
+ print(f"\n{LIGHT_BLUE}Assistant:{RESET_COLOR}", end=' ', flush=True)
561
+
562
+ # Initialize token printer
563
+ token_printer = TokenPrinter(tokenizer)
564
+ tokens_generated = 0 # Track number of tokens
565
+
566
+ try:
567
+ # Start prefill timing
568
+ prefill_start = time.time()
569
+
570
+ # Run prefill with state
571
+ current_pos = run_prefill(
572
+ embed_model,
573
+ ffn_models,
574
+ input_ids,
575
+ context_pos,
576
+ context_length,
577
+ batch_size,
578
+ state
579
+ )
580
+
581
+ # Calculate prefill timing
582
+ prefill_time = time.time() - prefill_start
583
+ prefill_tokens = context_pos # Number of tokens in input
584
+ prefill_tokens_per_sec = prefill_tokens / prefill_time if prefill_time > 0 else 0
585
+
586
+ # Generation loop with state
587
+ input_ids = input_ids
588
+ pos = context_pos
589
+ inference_start = time.time()
590
+ inference_tokens = 0
591
+
592
+ while pos < context_length - 1:
593
+ # Generate next token
594
+ next_token = generate_next_token(
595
+ embed_model,
596
+ ffn_models,
597
+ lmhead_model,
598
+ input_ids,
599
+ pos,
600
+ context_length,
601
+ state
602
+ )
603
+
604
+ # Add token to sequence
605
+ if pos < input_ids.size(1):
606
+ input_ids[0, pos] = next_token
607
+ else:
608
+ input_ids = torch.cat([
609
+ input_ids,
610
+ torch.tensor([[next_token]], dtype=torch.int32)
611
+ ], dim=1)
612
+
613
+ # Add to printer only if not in warmup
614
+ if not warmup:
615
+ token_printer.add_token(next_token)
616
+ token_printer.drain_buffer()
617
+
618
+ pos += 1
619
+ tokens_generated += 1
620
+ inference_tokens += 1
621
+
622
+ # Check limits
623
+ if warmup and tokens_generated >= WARMUP_TOKEN_LIMIT:
624
+ break
625
+
626
+ if next_token == tokenizer.eos_token_id:
627
+ break
628
+
629
+ # Calculate inference timing
630
+ inference_time = time.time() - inference_start
631
+ inference_tokens_per_sec = inference_tokens / inference_time if inference_time > 0 else 0
632
+
633
+ # Get final response and add to conversation
634
+ if not warmup:
635
+ response = token_printer.stop()
636
+ # Print timing stats
637
+ prefill_ms = prefill_time * 1000 # Convert to milliseconds
638
+ print(f"\nPrefill: {prefill_ms:.1f}ms ({prefill_tokens_per_sec:.1f} t/s)")
639
+ print(f"Inference: {inference_tokens_per_sec:.1f} t/s")
640
+ print(f"Total: Generated {tokens_generated} tokens in {prefill_time + inference_time:.2f}s")
641
+ conversation.append({"role": "assistant", "content": response})
642
+ else:
643
+ token_printer.stop() # Clean up without printing stats
644
+
645
+ # Exit after one response in auto_prompt mode
646
+ if auto_prompt is not None:
647
+ break
648
+
649
+ except KeyboardInterrupt:
650
+ print("\nGeneration interrupted")
651
+ token_printer.stop()
652
+ continue
653
+
654
+ except Exception as e:
655
+ print(f"\nError in chat loop: {str(e)}")
656
+ import traceback
657
+ traceback.print_exc()
658
+
659
+ def parse_args():
660
+ parser = argparse.ArgumentParser(description='Chat with CoreML LLaMA (c) 2025 Anemll')
661
+
662
+ # Add meta.yaml option
663
+ parser.add_argument('--meta', type=str, help='Path to meta.yaml to load all parameters')
664
+
665
+ # Model paths
666
+ parser.add_argument('--d', '--dir', type=str, default='.',
667
+ help='Directory containing model files (default: current directory)')
668
+ parser.add_argument('--embed', type=str, required=False,
669
+ help='Path to embeddings model (relative to --dir)')
670
+ parser.add_argument('--ffn', type=str, required=False,
671
+ help='Path to FFN model (can be chunked, relative to --dir)')
672
+ parser.add_argument('--lmhead', type=str, required=False,
673
+ help='Path to LM head model (relative to --dir)')
674
+ parser.add_argument('--tokenizer', type=str, required=False,
675
+ help='Path to tokenizer')
676
+
677
+ # Add new argument for auto-generation
678
+ parser.add_argument('--prompt', type=str,
679
+ help='If specified, run once with this prompt and exit')
680
+
681
+ # Model configuration
682
+ parser.add_argument('--context-length', type=int,
683
+ help='Context length for the model (default: 512), if not provided, it will be detected from the model directory name ctxNUMBER')
684
+
685
+ args = parser.parse_args()
686
+
687
+ # If meta.yaml is provided, load parameters from it
688
+ if args.meta:
689
+ try:
690
+ with open(args.meta, 'r') as f:
691
+ meta = yaml.safe_load(f)
692
+ params = meta['model_info']['parameters']
693
+
694
+ # Set model directory to meta.yaml directory if not specified
695
+ if not args.d or args.d == '.':
696
+ args.d = str(Path(args.meta).parent)
697
+
698
+ # Build model paths based on parameters
699
+ prefix = params.get('model_prefix', 'llama') # Default to 'llama' if not specified
700
+ lut_ffn = f"_lut{params['lut_ffn']}" if params['lut_ffn'] != 'none' else ''
701
+ lut_lmhead = f"_lut{params['lut_lmhead']}" if params['lut_lmhead'] != 'none' else ''
702
+ num_chunks = int(params['num_chunks'])
703
+
704
+ # Set model paths if not specified
705
+ if not args.embed:
706
+ args.embed = f'{prefix}_embeddings'
707
+ if not args.lmhead:
708
+ args.lmhead = f'{prefix}_lm_head{lut_lmhead}'
709
+ if not args.ffn:
710
+ args.ffn = f'{prefix}_FFN_PF{lut_ffn}_chunk_01of{num_chunks:02d}'
711
+ if not args.tokenizer:
712
+ args.tokenizer = args.d
713
+
714
+ # Set other parameters
715
+ args.context_length = int(params['context_length'])
716
+ args.batch_size = int(params['batch_size'])
717
+ args.num_chunks = num_chunks
718
+
719
+ print(f"\nLoaded parameters from {args.meta}:")
720
+ print(f" Context Length: {args.context_length}")
721
+ print(f" Batch Size: {args.batch_size}")
722
+ print(f" Num Chunks: {args.num_chunks}")
723
+ print(f" Models Directory: {args.d}")
724
+ print(f" Embeddings: {args.embed}")
725
+ print(f" LM Head: {args.lmhead}")
726
+ print(f" FFN: {args.ffn}")
727
+
728
+ except Exception as e:
729
+ print(f"\nError loading meta.yaml: {str(e)}")
730
+ sys.exit(1)
731
+
732
+ return args
733
+
734
+ def main():
735
+ args = parse_args()
736
+
737
+ # Convert directory to absolute path
738
+ model_dir = Path(args.d).resolve()
739
+ if not model_dir.exists():
740
+ print(f"\nError: Model directory not found: {model_dir}")
741
+ return 1
742
+
743
+ print(f"\nUsing model directory: {model_dir}")
744
+ print(f"Context length: {args.context_length}")
745
+
746
+ try:
747
+ # Update paths to be relative to model directory
748
+ args.embed = str(model_dir / args.embed)
749
+ args.ffn = str(model_dir / args.ffn)
750
+ args.lmhead = str(model_dir / args.lmhead)
751
+
752
+ # Handle tokenizer path separately since it's not relative to model_dir
753
+ if args.tokenizer is None:
754
+ args.tokenizer = str(model_dir)
755
+
756
+ if not Path(args.tokenizer).exists():
757
+ print(f"\nError: Tokenizer directory not found: {args.tokenizer}")
758
+ return 1
759
+
760
+ args.tokenizer = str(Path(args.tokenizer).resolve()) # Convert to absolute path
761
+ print(f"Using tokenizer path: {args.tokenizer}")
762
+
763
+ metadata = {}
764
+ # Load models and extract metadata
765
+ embed_model, ffn_models, lmhead_model, metadata = load_models(args,metadata)
766
+
767
+ print(f"\nMetadata befor args.context_length: {metadata}")
768
+
769
+ # Override context length from command line if provided
770
+ if args.context_length is not None:
771
+ metadata['context_length'] = args.context_length
772
+ metadata['state_length'] = args.context_length # Also update state_length
773
+ print(f"\nOverriding context length from command line: {args.context_length}")
774
+
775
+ print(f"\nMetadata after load_models: {metadata}")
776
+
777
+ # Load tokenizer with resolved path
778
+ tokenizer = initialize_tokenizer(args.tokenizer)
779
+ if tokenizer is None:
780
+ raise RuntimeError("Failed to initialize tokenizer")
781
+
782
+ # Create unified state once
783
+ state = create_unified_state(ffn_models, metadata['context_length'])
784
+
785
+ # Warmup runs to prevent Python GIL issues with CoreML !
786
+ for i in range(2):
787
+ chat_loop(
788
+ embed_model=embed_model,
789
+ ffn_models=ffn_models,
790
+ lmhead_model=lmhead_model,
791
+ tokenizer=tokenizer,
792
+ metadata=metadata,
793
+ state=state,
794
+ warmup=True,
795
+ auto_prompt="who are you?"
796
+ )
797
+
798
+ # Main run
799
+ chat_loop(
800
+ embed_model=embed_model,
801
+ ffn_models=ffn_models,
802
+ lmhead_model=lmhead_model,
803
+ tokenizer=tokenizer,
804
+ metadata=metadata,
805
+ state=state,
806
+ warmup=False,
807
+ auto_prompt=args.prompt
808
+ )
809
+
810
+ except Exception as e:
811
+ print(f"\nError: {str(e)}")
812
+ import traceback
813
+ traceback.print_exc()
814
+ return 1
815
+
816
+ return 0
817
+
818
+ if __name__ == "__main__":
819
+ exit(main())
chat_full.py ADDED
@@ -0,0 +1,854 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # chat.py
2
+ #!/usr/bin/env python3
3
+ # chat.py
4
+ # Copyright (c) 2025 Anemll
5
+ # Licensed under the MIT License
6
+
7
+ import argparse
8
+ import os
9
+ import re
10
+ import glob
11
+ from pathlib import Path
12
+ import coremltools as ct
13
+ from transformers import LlamaTokenizer, AutoTokenizer
14
+ import torch
15
+ import torch.nn.functional as F
16
+ import numpy as np
17
+ import queue
18
+ import threading
19
+ import time
20
+ import yaml
21
+ import sys
22
+
23
+ # ANSI color codes
24
+ LIGHT_BLUE = "\033[94m"
25
+ DARK_BLUE = "\033[34m"
26
+ LIGHT_GREEN = "\033[92m"
27
+ RESET_COLOR = "\033[0m"
28
+
29
+ # Add at the top with other constants
30
+ WARMUP_TOKEN_LIMIT = 10 # Maximum tokens to generate during warmup
31
+
32
+ class TokenPrinter:
33
+ """Handles background printing of generated tokens."""
34
+ def __init__(self, tokenizer):
35
+ self.tokenizer = tokenizer
36
+ self.token_queue = queue.Queue()
37
+ self.stop_event = threading.Event()
38
+ self.thread = None
39
+ self.buffer = ""
40
+ self.lock = threading.Lock()
41
+ self.thinking = True # Track if we're still in thinking mode
42
+ self.decoding_buffer = [] # Buffer for token IDs
43
+ # Timing and stats tracking
44
+ self.start_time = time.time()
45
+ self.token_count = 0
46
+ self.prefill_time = 0
47
+ self.inference_time = 0
48
+ self.context_pos = 0
49
+ self.start()
50
+
51
+ def start(self):
52
+ """Start the printer thread."""
53
+ if self.thread is None:
54
+ self.thread = threading.Thread(target=self._print_worker)
55
+ self.thread.daemon = True
56
+ self.thread.start()
57
+
58
+ def add_token(self, token_id):
59
+ """Add a token to the print queue."""
60
+ if not self.stop_event.is_set():
61
+ self.token_queue.put(token_id)
62
+ self.token_count += 1
63
+
64
+ def drain_buffer(self):
65
+ """Decode token IDs from decoding_buffer in the main thread."""
66
+ if not self.decoding_buffer:
67
+ return
68
+
69
+ # Decode all tokens at once in the main thread
70
+ token_str = self.tokenizer.decode(self.decoding_buffer)
71
+ self.decoding_buffer.clear()
72
+
73
+ # Color-handling logic
74
+ if self.thinking and "</think>" in token_str:
75
+ self.thinking = False
76
+ parts = token_str.split("</think>")
77
+ if len(parts) > 0:
78
+ print(parts[0] + "</think>", end='', flush=True)
79
+ if len(parts) > 1:
80
+ print(LIGHT_BLUE + parts[1], end='', flush=True)
81
+ else:
82
+ if not self.thinking:
83
+ print(LIGHT_BLUE + token_str, end='', flush=True)
84
+ else:
85
+ print(token_str, end='', flush=True)
86
+
87
+ def _print_worker(self):
88
+ """Worker thread that takes token_ids from the queue."""
89
+ while not self.stop_event.is_set():
90
+ try:
91
+ token_id = self.token_queue.get(timeout=0.01)
92
+ with self.lock:
93
+ self.decoding_buffer.append(token_id)
94
+ self.token_queue.task_done()
95
+ except queue.Empty:
96
+ continue
97
+ except Exception as e:
98
+ print(f"\nError: Token printer error: {str(e)}")
99
+ break
100
+
101
+ def stop(self):
102
+ """Stop the printer thread."""
103
+ if self.thread and self.thread.is_alive():
104
+ self.stop_event.set()
105
+ try:
106
+ self.thread.join(timeout=1.0)
107
+ except Exception:
108
+ pass
109
+ print(RESET_COLOR) # Reset color at the end
110
+ return self.buffer
111
+
112
+ def set_timing(self, prefill_time, inference_time, context_pos):
113
+ """Set timing information."""
114
+ self.prefill_time = prefill_time
115
+ self.inference_time = inference_time
116
+ self.context_pos = context_pos
117
+
118
+ def parse_model_path(path):
119
+ """Parse model path and return full path with .mlmodelc or .mlpackage extension."""
120
+ path = Path(path)
121
+
122
+ # If path exists exactly as specified, return it
123
+ if path.exists():
124
+ return str(path)
125
+
126
+ # Try with both extensions
127
+ candidates = [
128
+ path, # Original path
129
+ path.with_suffix('.mlmodelc'), # With .mlmodelc
130
+ path.with_suffix('.mlpackage'), # With .mlpackage
131
+ Path(str(path) + '.mlmodelc'), # Handle case where extension is included
132
+ Path(str(path) + '.mlpackage')
133
+ ]
134
+
135
+ # Try all possible paths
136
+ for candidate in candidates:
137
+ if candidate.exists():
138
+ print(f"Found model at: {candidate}")
139
+ return str(candidate)
140
+
141
+ # If we get here, no valid path was found
142
+ print("\nError: Model not found. Tried following paths:")
143
+ for candidate in candidates:
144
+ print(f" {candidate}")
145
+ raise FileNotFoundError(f"Model not found: {path}")
146
+
147
+ def parse_ffn_filename(path):
148
+ """Parse FFN model filename to extract chunk information."""
149
+ path = Path(path)
150
+ pattern = r'FFN_PF.*_chunk_(\d+)of(\d+)'
151
+ match = re.search(pattern, path.name)
152
+
153
+ if match:
154
+ current_chunk = int(match.group(1))
155
+ total_chunks = int(match.group(2))
156
+ return current_chunk, total_chunks
157
+ return None, None
158
+
159
+ def find_all_chunks(base_path):
160
+ """Find all chunk files matching the base FFN path pattern."""
161
+ path = Path(base_path)
162
+ pattern = re.sub(r'_chunk_\d+of\d+', '_chunk_*', str(path))
163
+ return sorted(glob.glob(pattern))
164
+
165
+ def load_model(path, function_name=None):
166
+ """Load a CoreML model, handling both .mlmodelc and .mlpackage formats."""
167
+ path = Path(path)
168
+ compute_unit = ct.ComputeUnit.CPU_AND_NE
169
+
170
+ try:
171
+ if path.suffix == '.mlmodelc':
172
+ # For compiled models (.mlmodelc), use CompiledMLModel
173
+ if function_name:
174
+ return ct.models.CompiledMLModel(str(path), compute_unit, function_name=function_name)
175
+ else:
176
+ return ct.models.CompiledMLModel(str(path), compute_unit)
177
+ else:
178
+ # For packages (.mlpackage)
179
+ if function_name:
180
+ return ct.models.MLModel(str(path), function_name=function_name)
181
+ else:
182
+ return ct.models.MLModel(str(path))
183
+
184
+ except RuntimeError as e:
185
+ if "valid manifest does not exist" in str(e):
186
+ print(f"\nError: Could not load compiled model at {path}")
187
+ print("This might be because:")
188
+ print("1. The model is not properly compiled")
189
+ print("2. The model was compiled for a different OS version")
190
+ print("3. The model needs to be recompiled")
191
+ print("\nTry using the .mlpackage version instead, or recompile the model.")
192
+ raise
193
+
194
+ def load_metadata(model,args):
195
+ # Extract metadata and config parameters
196
+ metadata = {}
197
+ if hasattr(model, 'user_defined_metadata'):
198
+ meta = model.user_defined_metadata
199
+
200
+ # Extract key parameters with defaults
201
+ metadata['context_length'] = int(meta.get('com.anemll.context_length', 512))
202
+ metadata['state_length'] = int(meta.get('com.anemll.state_length', metadata['context_length'])) # Added state_length
203
+ metadata['batch_size'] = int(meta.get('com.anemll.batch_size', 64))
204
+ metadata['lut_bits'] = int(meta.get('com.anemll.lut_bits', 0))
205
+ metadata['num_chunks'] = int(meta.get('com.anemll.num_chunks', 1))
206
+
207
+ print("\nExtracted Parameters:")
208
+ print(f" Context Length: {metadata['context_length']}")
209
+ print(f" State Length: {metadata['state_length']}")
210
+ print(f" Prefill Batch Size: {metadata['batch_size']}")
211
+ print(f" LUT Bits: {metadata['lut_bits']}")
212
+ print(f" Number of Chunks: {metadata['num_chunks']}")
213
+
214
+ # Print model info
215
+ print("\nModel Info:")
216
+ if 'com.anemll.info' in meta:
217
+ print(f" {meta['com.anemll.info']}")
218
+ if 'com.github.apple.coremltools.version' in meta:
219
+ print(f" CoreML Tools: {meta['com.github.apple.coremltools.version']}")
220
+
221
+ # Print model input/output shapes
222
+ print("\nModel Shapes:")
223
+ if hasattr(model, 'input_description'):
224
+ print(" Inputs:")
225
+ for name, desc in model.input_description.items():
226
+ print(f" {name}: {desc}")
227
+ if hasattr(model, 'output_description'):
228
+ print(" Outputs:")
229
+ for name, desc in model.output_description.items():
230
+ print(f" {name}: {desc}")
231
+ else:
232
+ print("\nWarning: No metadata found in model")
233
+
234
+ # Check if model directory name contains context length pattern (ctxXXX)
235
+ ctx_len = 512
236
+ if args.context_length is None:
237
+ import re
238
+ ctx_match = re.search(r'ctx(\d+)', str(args.d))
239
+ if ctx_match:
240
+ ctx_len0 = int(ctx_match.group(1))
241
+ if 512 <= ctx_len0 <= 8096:
242
+ ctx_len = ctx_len0
243
+ print(f"\nDetected context length {ctx_len} from directory name")
244
+ else:
245
+ print(f"\nWarning: No context length found in directory {ctx_len} from directory name {args.d}")
246
+ else:
247
+ ctx_len = args.context_length
248
+
249
+ # Use defaults
250
+ metadata['context_length'] = ctx_len
251
+ metadata['state_length'] = ctx_len
252
+ metadata['batch_size'] = 64
253
+ metadata['lut_bits'] = 4
254
+ metadata['num_chunks'] = 4
255
+ print("\nUsing default parameters:")
256
+ print(f" Context Length: {metadata['context_length']}")
257
+ print(f" State Length: {metadata['state_length']}")
258
+ print(f" Prefill Batch Size: {metadata['batch_size']}")
259
+ print(f" LUT Bits: {metadata['lut_bits']}")
260
+ print(f" Number of Chunks: {metadata['num_chunks']}")
261
+ return metadata
262
+
263
+ def load_models(args,metadata):
264
+ """Load all required models and extract metadata."""
265
+ print("\nLoading models...")
266
+
267
+ try:
268
+ # Load embeddings model
269
+ print("\nLoading embeddings model...")
270
+ embed_path = parse_model_path(args.embed)
271
+ print(f"Loading from: {embed_path}")
272
+ embed_model = load_model(embed_path)
273
+ print("Embeddings model loaded successfully")
274
+ metadata = load_metadata(embed_model,args)
275
+
276
+
277
+
278
+ # Load LM head model
279
+ print("\nLoading LM head model...")
280
+ lmhead_path = parse_model_path(args.lmhead)
281
+ print(f"Loading from: {lmhead_path}")
282
+ lmhead_model = load_model(lmhead_path)
283
+ print("LM head model loaded successfully")
284
+
285
+ # Parse FFN path and find chunks if needed
286
+ print("\nLoading FFN+PREFILL model(s)...")
287
+ ffn_path = parse_model_path(args.ffn)
288
+ chunk_no, total_chunks = parse_ffn_filename(ffn_path)
289
+
290
+ ffn_models = []
291
+ if chunk_no and total_chunks:
292
+ print(f"\nDetected chunked FFN+PREFILL model ({total_chunks} chunks)")
293
+ # Find and load all chunks
294
+ chunk_paths = find_all_chunks(ffn_path)
295
+ if len(chunk_paths) != total_chunks:
296
+ raise ValueError(f"Found {len(chunk_paths)} chunks but filename indicates {total_chunks} chunks")
297
+
298
+ for chunk_path in chunk_paths:
299
+ print(f"\nLoading FFN+PREFILL chunk: {Path(chunk_path).name}")
300
+ try:
301
+ # For chunked models, we need both infer and prefill functions
302
+ ffn_models.append({
303
+ 'infer': load_model(chunk_path, function_name='infer'),
304
+ 'prefill': load_model(chunk_path, function_name='prefill')
305
+ })
306
+ print("Chunk loaded successfully")
307
+ except Exception as e:
308
+ print(f"Error loading chunk {chunk_path}: {str(e)}")
309
+ raise
310
+ metadata = load_metadata(ffn_models[0],args)
311
+
312
+ else:
313
+ print("\nLoading single FFN model...")
314
+ ffn_models.append(load_model(ffn_path))
315
+ print("FFN model loaded successfully")
316
+
317
+ return embed_model, ffn_models, lmhead_model, metadata
318
+
319
+ except Exception as e:
320
+ print(f"\nError loading models: {str(e)}")
321
+ print("\nPlease ensure all model files exist and are accessible.")
322
+ print("Expected files:")
323
+ print(f" Embeddings: {args.embed}")
324
+ print(f" LM Head: {args.lmhead}")
325
+ print(f" FFN: {args.ffn}")
326
+ raise
327
+
328
+ # At the top of the file, make this a default path
329
+
330
+ def initialize_tokenizer(model_path=None):
331
+ """Initialize and configure the tokenizer."""
332
+ try:
333
+
334
+
335
+ tokenizer = AutoTokenizer.from_pretrained(
336
+ str(model_path),
337
+ use_fast=False,
338
+ trust_remote_code=True
339
+ )
340
+
341
+ print("\nTokenizer Configuration:")
342
+ print(f"Tokenizer type: {type(tokenizer)}")
343
+ print(f"Tokenizer name: {tokenizer.__class__.__name__}")
344
+ print(f"Vocabulary size: {len(tokenizer)}")
345
+ print(f"Model max length: {tokenizer.model_max_length}")
346
+
347
+ if tokenizer.pad_token is None:
348
+ tokenizer.pad_token = tokenizer.eos_token
349
+ tokenizer.pad_token_id = tokenizer.eos_token_id
350
+ print("Set PAD token to EOS token")
351
+
352
+ tokenizer.padding_side = "left"
353
+
354
+ print(f"\nSpecial Tokens:")
355
+ print(f"PAD token: '{tokenizer.pad_token}' (ID: {tokenizer.pad_token_id})")
356
+ print(f"EOS token: '{tokenizer.eos_token}' (ID: {tokenizer.eos_token_id})")
357
+ print(f"BOS token: '{tokenizer.bos_token}' (ID: {tokenizer.bos_token_id})")
358
+ print(f"UNK token: '{tokenizer.unk_token}' (ID: {tokenizer.unk_token_id})")
359
+
360
+ return tokenizer
361
+
362
+ except Exception as e:
363
+ print(f"\nError: Failed to load tokenizer from {model_path}")
364
+ print(f"Error details: {str(e)}")
365
+ print(f"Error type: {type(e)}")
366
+ print("\nThis code requires a Llama 3.2 model for chat template functionality.")
367
+ print("Please provide the path to a Llama 3.2 model directory.")
368
+ import traceback
369
+ traceback.print_exc()
370
+ raise
371
+
372
+
373
+
374
+ def make_causal_mask(length, start):
375
+ """Create causal attention mask."""
376
+ mask = np.full((1, 1, length, length), -np.inf, dtype=np.float16)
377
+ row_indices = np.arange(length).reshape(length, 1)
378
+ col_indices = np.arange(length).reshape(1, length)
379
+ mask[:, :, col_indices <= (row_indices + start)] = 0
380
+ return mask
381
+
382
+ def run_prefill(embed_model, ffn_models, input_ids, current_pos, context_length, batch_size, state):
383
+ """Run prefill on the input sequence."""
384
+ #print(f"[DEBUG] Running prefill from 0 to {current_pos}")
385
+
386
+ # Process in batches
387
+ batch_pos = 0
388
+ while batch_pos < current_pos:
389
+ batch_end = min(batch_pos + batch_size, current_pos)
390
+ current_batch_size = batch_end - batch_pos
391
+
392
+ #print(f"[DEBUG] Prefill batch {batch_pos}-{batch_end} (size={current_batch_size})")
393
+
394
+ # Get current batch
395
+ batch_input = input_ids[:, batch_pos:batch_end]
396
+
397
+ # Pad to full batch size
398
+ batch_input = F.pad(
399
+ batch_input,
400
+ (0, batch_size - current_batch_size),
401
+ value=0
402
+ )
403
+
404
+ # Generate position IDs for this batch
405
+ position_ids = torch.arange(batch_pos, batch_pos + batch_size, dtype=torch.int32)
406
+
407
+ # Create causal mask for this batch
408
+ causal_mask = make_causal_mask(context_length, 0) # Always start from 0 for prefill
409
+ causal_mask = torch.tensor(causal_mask, dtype=torch.float16)
410
+ batch_causal_mask = causal_mask[:, :, batch_pos:batch_pos + batch_size, :]
411
+
412
+ # Run embeddings
413
+ hidden_states = torch.from_numpy(
414
+ embed_model.predict({'input_ids': batch_input.numpy()})['hidden_states']
415
+ )
416
+
417
+ # Run through FFN chunks
418
+ for ffn_model in ffn_models:
419
+ if isinstance(ffn_model, dict):
420
+ inputs = {
421
+ 'hidden_states': hidden_states.numpy(),
422
+ 'position_ids': position_ids.numpy(),
423
+ 'causal_mask': batch_causal_mask.numpy(),
424
+ 'current_pos': np.array([batch_pos], dtype=np.int32)
425
+ }
426
+ output = ffn_model['prefill'].predict(inputs, state)
427
+ hidden_states = torch.from_numpy(output['output_hidden_states'])
428
+
429
+ batch_pos = batch_end
430
+
431
+ return torch.tensor([current_pos], dtype=torch.int32)
432
+
433
+ def generate_next_token(embed_model, ffn_models, lmhead_model, input_ids, pos, context_length, state=None, temperature=0.0):
434
+ """Generate the next token."""
435
+ # Get current token
436
+ current_token = input_ids[:, pos-1:pos]
437
+
438
+ # Run embeddings
439
+ hidden_states = torch.from_numpy(
440
+ embed_model.predict({'input_ids': current_token.numpy()})['hidden_states']
441
+ )
442
+
443
+ # Create masks
444
+ update_mask = torch.zeros((1, 1, context_length, 1), dtype=torch.float16)
445
+ update_mask[0, 0, pos-1, 0] = 1.0
446
+ position_ids = torch.tensor([pos-1], dtype=torch.int32)
447
+
448
+ # Create causal mask for current position
449
+ causal_mask = make_causal_mask(context_length, 0) # Always start from 0 for generation
450
+ single_causal_mask = torch.tensor(causal_mask[:, :, pos-1:pos, :], dtype=torch.float16)
451
+
452
+ # Run through FFN chunks
453
+ for ffn_model in ffn_models:
454
+ if isinstance(ffn_model, dict):
455
+ inputs = {
456
+ 'hidden_states': hidden_states.numpy(),
457
+ 'update_mask': update_mask.numpy(),
458
+ 'position_ids': position_ids.numpy(),
459
+ 'causal_mask': single_causal_mask.numpy(),
460
+ 'current_pos': position_ids.numpy()
461
+ }
462
+ output = ffn_model['infer'].predict(inputs, state)
463
+ hidden_states = torch.from_numpy(output['output_hidden_states'])
464
+
465
+ # Run LM head and get next token
466
+ lm_output = lmhead_model.predict({'hidden_states': hidden_states.numpy()})
467
+
468
+ if 'logits1' in lm_output:
469
+ logits_parts = []
470
+ for i in range(1, 9):
471
+ key = f'logits{i}'
472
+ if key in lm_output:
473
+ logits_parts.append(torch.from_numpy(lm_output[key]))
474
+ logits = torch.cat(logits_parts, dim=-1)
475
+ else:
476
+ logits = torch.from_numpy(lm_output['output_logits'])
477
+
478
+ if temperature > 0:
479
+ logits = logits / temperature
480
+ probs = F.softmax(logits[0, -1, :], dim=-1)
481
+ next_token = torch.multinomial(probs, num_samples=1).item()
482
+ else:
483
+ next_token = torch.argmax(logits[0, -1, :]).item()
484
+
485
+ return next_token
486
+
487
+ def create_unified_state(ffn_models, context_length):
488
+ """Create unified KV cache state for transformer."""
489
+ if isinstance(ffn_models[0], dict):
490
+ # Use first FFN model's prefill function to create state
491
+ state = ffn_models[0]['prefill'].make_state()
492
+ print(f"\nCreated unified transformer state for {len(ffn_models)} chunks")
493
+ return state
494
+ else:
495
+ state = ffn_models[0].make_state()
496
+ print("\nCreated unified transformer state")
497
+ return state
498
+
499
+ def get_user_input():
500
+ sys.stdout.write(f"\n{LIGHT_GREEN}You:{RESET_COLOR} ")
501
+ sys.stdout.flush()
502
+ line = sys.stdin.readline()
503
+ if not line:
504
+ raise EOFError
505
+ return line.rstrip('\n')
506
+
507
+ def chat_loop(embed_model, ffn_models, lmhead_model, tokenizer, metadata, state, auto_prompt=None, warmup=False):
508
+ """Interactive chat loop."""
509
+ context_length = metadata.get('context_length')
510
+ batch_size = metadata.get('batch_size', 64)
511
+
512
+ if not warmup:
513
+ print(f"\nUsing context length: {context_length}")
514
+ print("\nStarting chat session. Press Ctrl+D to exit.")
515
+ print("Type your message and press Enter to chat.")
516
+
517
+ # Keep track of conversation history
518
+ conversation = []
519
+
520
+ try:
521
+ while True:
522
+ try:
523
+ if not warmup:
524
+ print(f"\n{LIGHT_GREEN}You:{RESET_COLOR}", end=' ', flush=True)
525
+ if auto_prompt is not None:
526
+ user_input = auto_prompt
527
+ if not warmup:
528
+ print(user_input)
529
+ else:
530
+ user_input = input().strip()
531
+ except EOFError:
532
+ if not warmup:
533
+ print("\nExiting chat...")
534
+ break
535
+
536
+ if not user_input:
537
+ continue
538
+
539
+ # Add user message to conversation
540
+ conversation.append({"role": "user", "content": user_input})
541
+
542
+ # Format using chat template with full history
543
+ base_input_ids = tokenizer.apply_chat_template(
544
+ conversation,
545
+ return_tensors="pt",
546
+ add_generation_prompt=True
547
+ ).to(torch.int32)
548
+
549
+ # Check if we need to trim history
550
+ while base_input_ids.size(1) > context_length - 100: # Leave room for response
551
+ # Remove oldest message pair (user + assistant)
552
+ if len(conversation) > 2:
553
+ conversation = conversation[2:] # Remove oldest pair
554
+ base_input_ids = tokenizer.apply_chat_template(
555
+ conversation,
556
+ return_tensors="pt",
557
+ add_generation_prompt=True
558
+ ).to(torch.int32)
559
+ else:
560
+ # If only current message remains and still too long, truncate
561
+ base_input_ids = base_input_ids[:, -context_length//2:]
562
+ break
563
+
564
+ context_pos = base_input_ids.size(1)
565
+
566
+ # Pad sequence to context_size
567
+ input_ids = F.pad(
568
+ base_input_ids,
569
+ (0, context_length - context_pos),
570
+ value=0
571
+ )
572
+
573
+ if not warmup:
574
+ print(f"\n{LIGHT_BLUE}Assistant:{RESET_COLOR}", end=' ', flush=True)
575
+
576
+ # Initialize token printer and collect response
577
+ token_printer = TokenPrinter(tokenizer)
578
+ response_tokens = []
579
+ generation_start_time = time.time()
580
+
581
+ try:
582
+ # Create initial causal mask
583
+ causal_mask = make_causal_mask(context_length, 0)
584
+ causal_mask = torch.tensor(causal_mask, dtype=torch.float16)
585
+
586
+ # Run prefill on entire context
587
+ current_pos = run_prefill(
588
+ embed_model,
589
+ ffn_models,
590
+ input_ids,
591
+ context_pos,
592
+ context_length,
593
+ batch_size,
594
+ state
595
+ )
596
+ #print(f"\n[DEBUG] After initial prefill - current_pos: {current_pos}")
597
+
598
+ # Generation loop
599
+ pos = context_pos
600
+ tokens_generated = 0
601
+ inference_start = time.time() # Start inference timing
602
+
603
+ while True:
604
+ # Check if we need to shift window
605
+ if pos >= context_length - 2:
606
+ # Calculate shift to maintain full batches
607
+ batch_size = metadata.get('batch_size', 64)
608
+ # Calculate max batches that fit in context
609
+ max_batches = context_length // batch_size
610
+ desired_batches = max(1, max_batches - 2) # Leave room for new tokens
611
+ new_size = min(desired_batches * batch_size, context_length - batch_size)
612
+
613
+ # Create shifted input_ids
614
+ tmp = torch.zeros((1, context_length), dtype=torch.int32)
615
+ tmp[:,0:new_size] = input_ids[:,pos-new_size:pos]
616
+ input_ids = tmp
617
+
618
+ # Reset state and run prefill
619
+ # keep the same state
620
+ #state = create_unified_state(ffn_models, context_length)
621
+ current_pos = run_prefill(
622
+ embed_model,
623
+ ffn_models,
624
+ input_ids,
625
+ new_size, # Prefill the entire shifted content
626
+ context_length,
627
+ batch_size,
628
+ state
629
+ )
630
+
631
+ # Start generating from the next position
632
+ pos = new_size # Don't back up, continue from where we left off
633
+
634
+ #print(f"\n[DEBUG] After shift - next token will be at pos {pos}")
635
+ #print(f"[DEBUG] Context before next token: {tokenizer.decode(input_ids[0, pos-40:pos])}")
636
+
637
+ window_shifted = True
638
+
639
+ # Generate next token
640
+ next_token = generate_next_token(
641
+ embed_model,
642
+ ffn_models,
643
+ lmhead_model,
644
+ input_ids,
645
+ pos,
646
+ context_length,
647
+ state
648
+ )
649
+
650
+ # Add token
651
+ input_ids[0, pos] = next_token
652
+ if not warmup:
653
+ token_printer.add_token(next_token)
654
+ token_printer.drain_buffer()
655
+ response_tokens.append(next_token)
656
+
657
+ pos += 1
658
+ tokens_generated += 1
659
+
660
+ # In warmup mode, limit tokens
661
+ if warmup and tokens_generated >= WARMUP_TOKEN_LIMIT:
662
+ break
663
+
664
+ if next_token == tokenizer.eos_token_id:
665
+ break
666
+
667
+ inference_time = time.time() - inference_start # Calculate inference time
668
+
669
+ # Add assistant response to conversation
670
+ response_text = token_printer.stop()
671
+ conversation.append({"role": "assistant", "content": response_text})
672
+
673
+ # Print stats only if not in warmup
674
+ if not warmup:
675
+ total_time = time.time() - generation_start_time
676
+ prefill_time = total_time - inference_time
677
+ inference_tokens_per_sec = len(response_tokens) / inference_time if inference_time > 0 else 0
678
+ prefill_ms = prefill_time * 1000
679
+ prefill_tokens_per_sec = context_pos / prefill_time if prefill_time > 0 else 0
680
+ print(f"{DARK_BLUE}{inference_tokens_per_sec:.1f} t/s, "
681
+ f"TTFT: {prefill_ms:.1f}ms ({prefill_tokens_per_sec:.1f} t/s), "
682
+ f"{len(response_tokens)} tokens{RESET_COLOR}")
683
+
684
+ if auto_prompt is not None:
685
+ break
686
+
687
+ except KeyboardInterrupt:
688
+ if not warmup:
689
+ print("\nGeneration interrupted")
690
+ token_printer.stop()
691
+ continue
692
+
693
+ except Exception as e:
694
+ if not warmup:
695
+ print(f"\nError in chat loop: {str(e)}")
696
+ import traceback
697
+ traceback.print_exc()
698
+
699
+ def main():
700
+ parser = argparse.ArgumentParser(description='Full Chat with CoreML LLaMA with context window shifting (c) 2025 Anemll')
701
+
702
+ # Add meta.yaml option
703
+ parser.add_argument('--meta', type=str, help='Path to meta.yaml to load all parameters')
704
+
705
+ # Add existing arguments
706
+ parser.add_argument('--d', '--dir', type=str, default='.',
707
+ help='Directory containing model files (default: current directory)')
708
+ parser.add_argument('--embed', type=str, required=False,
709
+ help='Path to embeddings model (relative to --dir)')
710
+ parser.add_argument('--ffn', type=str, required=False,
711
+ help='Path to FFN model (can be chunked, relative to --dir)')
712
+ parser.add_argument('--lmhead', type=str, required=False,
713
+ help='Path to LM head model (relative to --dir)')
714
+ parser.add_argument('--tokenizer', type=str, required=False,
715
+ help='Path to tokenizer')
716
+
717
+ # Add new argument for auto-generation
718
+ parser.add_argument('--prompt', type=str,
719
+ help='If specified, run once with this prompt and exit')
720
+
721
+ # Model configuration
722
+ parser.add_argument('--context-length', type=int,
723
+ help='Context length for the model (default: 512), if not provided, it will be detected from the model directory name ctxNUMBER')
724
+
725
+ args = parser.parse_args()
726
+
727
+ # If meta.yaml is provided, load parameters from it
728
+ if args.meta:
729
+ try:
730
+ with open(args.meta, 'r') as f:
731
+ meta = yaml.safe_load(f)
732
+ params = meta['model_info']['parameters']
733
+
734
+ # Set model directory to meta.yaml directory if not specified
735
+ if not args.d or args.d == '.':
736
+ args.d = str(Path(args.meta).parent)
737
+
738
+ # Build model paths based on parameters
739
+ prefix = params.get('model_prefix', 'llama') # Default to 'llama' if not specified
740
+ lut_ffn = f"_lut{params['lut_ffn']}" if params['lut_ffn'] != 'none' else ''
741
+ lut_lmhead = f"_lut{params['lut_lmhead']}" if params['lut_lmhead'] != 'none' else ''
742
+ num_chunks = int(params['num_chunks'])
743
+
744
+ # Set model paths if not specified
745
+ if not args.embed:
746
+ args.embed = f'{prefix}_embeddings'
747
+ if not args.lmhead:
748
+ args.lmhead = f'{prefix}_lm_head{lut_lmhead}'
749
+ if not args.ffn:
750
+ args.ffn = f'{prefix}_FFN_PF{lut_ffn}_chunk_01of{num_chunks:02d}'
751
+ if not args.tokenizer:
752
+ args.tokenizer = args.d
753
+
754
+ # Set other parameters
755
+ args.context_length = int(params['context_length'])
756
+ args.batch_size = int(params['batch_size'])
757
+ args.num_chunks = num_chunks
758
+
759
+ print(f"\nLoaded parameters from {args.meta}:")
760
+ print(f" Context Length: {args.context_length}")
761
+ print(f" Batch Size: {args.batch_size}")
762
+ print(f" Num Chunks: {args.num_chunks}")
763
+ print(f" Models Directory: {args.d}")
764
+ print(f" Embeddings: {args.embed}")
765
+ print(f" LM Head: {args.lmhead}")
766
+ print(f" FFN: {args.ffn}")
767
+
768
+ except Exception as e:
769
+ print(f"\nError loading meta.yaml: {str(e)}")
770
+ sys.exit(1)
771
+
772
+ # Convert directory to absolute path
773
+ model_dir = Path(args.d).resolve()
774
+ if not model_dir.exists():
775
+ print(f"\nError: Model directory not found: {model_dir}")
776
+ return 1
777
+
778
+ print(f"\nUsing model directory: {model_dir}")
779
+ print(f"Context length: {args.context_length}")
780
+
781
+ try:
782
+ # Update paths to be relative to model directory
783
+ args.embed = str(model_dir / args.embed)
784
+ args.ffn = str(model_dir / args.ffn)
785
+ args.lmhead = str(model_dir / args.lmhead)
786
+
787
+ # Handle tokenizer path separately since it's not relative to model_dir
788
+ if args.tokenizer is None:
789
+ args.tokenizer = str(model_dir)
790
+
791
+ if not Path(args.tokenizer).exists():
792
+ print(f"\nError: Tokenizer directory not found: {args.tokenizer}")
793
+ return 1
794
+
795
+ args.tokenizer = str(Path(args.tokenizer).resolve()) # Convert to absolute path
796
+ print(f"Using tokenizer path: {args.tokenizer}")
797
+
798
+ metadata = {}
799
+ # Load models and extract metadata
800
+ embed_model, ffn_models, lmhead_model, metadata = load_models(args,metadata)
801
+
802
+ print(f"\nMetadata befor args.context_length: {metadata}")
803
+
804
+ # Override context length from command line if provided
805
+ if args.context_length is not None:
806
+ metadata['context_length'] = args.context_length
807
+ metadata['state_length'] = args.context_length # Also update state_length
808
+ print(f"\nOverriding context length from command line: {args.context_length}")
809
+
810
+ print(f"\nMetadata after load_models: {metadata}")
811
+
812
+ # Load tokenizer with resolved path
813
+ tokenizer = initialize_tokenizer(args.tokenizer)
814
+ if tokenizer is None:
815
+ raise RuntimeError("Failed to initialize tokenizer")
816
+
817
+ # Create unified state once
818
+ state = create_unified_state(ffn_models, metadata['context_length'])
819
+
820
+ # Warmup runs to prevent Python GIL issues with CoreML !
821
+ for i in range(2):
822
+ chat_loop(
823
+ embed_model=embed_model,
824
+ ffn_models=ffn_models,
825
+ lmhead_model=lmhead_model,
826
+ tokenizer=tokenizer,
827
+ metadata=metadata,
828
+ state=state, # Pass the state
829
+ warmup=True,
830
+ auto_prompt="who are you?"
831
+ )
832
+
833
+ # Main run
834
+ chat_loop(
835
+ embed_model=embed_model,
836
+ ffn_models=ffn_models,
837
+ lmhead_model=lmhead_model,
838
+ tokenizer=tokenizer,
839
+ metadata=metadata,
840
+ state=state, # Pass the state
841
+ warmup=False,
842
+ auto_prompt=args.prompt
843
+ )
844
+
845
+ except Exception as e:
846
+ print(f"\nError: {str(e)}")
847
+ import traceback
848
+ traceback.print_exc()
849
+ return 1
850
+
851
+ return 0
852
+
853
+ if __name__ == "__main__":
854
+ exit(main())
meta.yaml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_info:
2
+ name: anemll-DeepHermes-3-Llama-3-8B-Preview-ctx1024
3
+ version: 0.1.1
4
+ description: |
5
+ Demonstarates running DeepHermes-3-Llama-3-8B-Preview on Apple Neural Engine
6
+ Context length: 1024
7
+ Batch size: 64
8
+ Chunks: 8
9
+ license: MIT
10
+ author: Anemll
11
+ framework: Core ML
12
+ language: Python
13
+ parameters:
14
+ context_length: 1024
15
+ batch_size: 64
16
+ lut_embeddings: none
17
+ lut_ffn: 6
18
+ lut_lmhead: 6
19
+ num_chunks: 8
20
+ model_prefix: DeepHermes
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b9e4e7fb171f92fd137b777cc2714bf87d11576700a1dcd7a399e7bbe39537b
3
+ size 17209920
tokenizer_config.json ADDED
@@ -0,0 +1,2077 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "added_tokens_decoder": {
4
+ "128000": {
5
+ "content": "<|begin_of_text|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "128001": {
13
+ "content": "<|end_of_text|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "128002": {
21
+ "content": "<|reserved_special_token_0|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ },
28
+ "128003": {
29
+ "content": "<|reserved_special_token_1|>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false,
34
+ "special": true
35
+ },
36
+ "128004": {
37
+ "content": "<|finetune_right_pad_id|>",
38
+ "lstrip": false,
39
+ "normalized": false,
40
+ "rstrip": false,
41
+ "single_word": false,
42
+ "special": true
43
+ },
44
+ "128005": {
45
+ "content": "<|reserved_special_token_2|>",
46
+ "lstrip": false,
47
+ "normalized": false,
48
+ "rstrip": false,
49
+ "single_word": false,
50
+ "special": true
51
+ },
52
+ "128006": {
53
+ "content": "<|start_header_id|>",
54
+ "lstrip": false,
55
+ "normalized": false,
56
+ "rstrip": false,
57
+ "single_word": false,
58
+ "special": true
59
+ },
60
+ "128007": {
61
+ "content": "<|end_header_id|>",
62
+ "lstrip": false,
63
+ "normalized": false,
64
+ "rstrip": false,
65
+ "single_word": false,
66
+ "special": true
67
+ },
68
+ "128008": {
69
+ "content": "<|eom_id|>",
70
+ "lstrip": false,
71
+ "normalized": false,
72
+ "rstrip": false,
73
+ "single_word": false,
74
+ "special": true
75
+ },
76
+ "128009": {
77
+ "content": "<|eot_id|>",
78
+ "lstrip": false,
79
+ "normalized": false,
80
+ "rstrip": false,
81
+ "single_word": false,
82
+ "special": true
83
+ },
84
+ "128010": {
85
+ "content": "<|python_tag|>",
86
+ "lstrip": false,
87
+ "normalized": false,
88
+ "rstrip": false,
89
+ "single_word": false,
90
+ "special": true
91
+ },
92
+ "128011": {
93
+ "content": "<|reserved_special_token_3|>",
94
+ "lstrip": false,
95
+ "normalized": false,
96
+ "rstrip": false,
97
+ "single_word": false,
98
+ "special": true
99
+ },
100
+ "128012": {
101
+ "content": "<|reserved_special_token_4|>",
102
+ "lstrip": false,
103
+ "normalized": false,
104
+ "rstrip": false,
105
+ "single_word": false,
106
+ "special": true
107
+ },
108
+ "128013": {
109
+ "content": "<|reserved_special_token_5|>",
110
+ "lstrip": false,
111
+ "normalized": false,
112
+ "rstrip": false,
113
+ "single_word": false,
114
+ "special": true
115
+ },
116
+ "128014": {
117
+ "content": "<|reserved_special_token_6|>",
118
+ "lstrip": false,
119
+ "normalized": false,
120
+ "rstrip": false,
121
+ "single_word": false,
122
+ "special": true
123
+ },
124
+ "128015": {
125
+ "content": "<|reserved_special_token_7|>",
126
+ "lstrip": false,
127
+ "normalized": false,
128
+ "rstrip": false,
129
+ "single_word": false,
130
+ "special": true
131
+ },
132
+ "128016": {
133
+ "content": "<|reserved_special_token_8|>",
134
+ "lstrip": false,
135
+ "normalized": false,
136
+ "rstrip": false,
137
+ "single_word": false,
138
+ "special": true
139
+ },
140
+ "128017": {
141
+ "content": "<|reserved_special_token_9|>",
142
+ "lstrip": false,
143
+ "normalized": false,
144
+ "rstrip": false,
145
+ "single_word": false,
146
+ "special": true
147
+ },
148
+ "128018": {
149
+ "content": "<|reserved_special_token_10|>",
150
+ "lstrip": false,
151
+ "normalized": false,
152
+ "rstrip": false,
153
+ "single_word": false,
154
+ "special": true
155
+ },
156
+ "128019": {
157
+ "content": "<|reserved_special_token_11|>",
158
+ "lstrip": false,
159
+ "normalized": false,
160
+ "rstrip": false,
161
+ "single_word": false,
162
+ "special": true
163
+ },
164
+ "128020": {
165
+ "content": "<|reserved_special_token_12|>",
166
+ "lstrip": false,
167
+ "normalized": false,
168
+ "rstrip": false,
169
+ "single_word": false,
170
+ "special": true
171
+ },
172
+ "128021": {
173
+ "content": "<|reserved_special_token_13|>",
174
+ "lstrip": false,
175
+ "normalized": false,
176
+ "rstrip": false,
177
+ "single_word": false,
178
+ "special": true
179
+ },
180
+ "128022": {
181
+ "content": "<|reserved_special_token_14|>",
182
+ "lstrip": false,
183
+ "normalized": false,
184
+ "rstrip": false,
185
+ "single_word": false,
186
+ "special": true
187
+ },
188
+ "128023": {
189
+ "content": "<|reserved_special_token_15|>",
190
+ "lstrip": false,
191
+ "normalized": false,
192
+ "rstrip": false,
193
+ "single_word": false,
194
+ "special": true
195
+ },
196
+ "128024": {
197
+ "content": "<|reserved_special_token_16|>",
198
+ "lstrip": false,
199
+ "normalized": false,
200
+ "rstrip": false,
201
+ "single_word": false,
202
+ "special": true
203
+ },
204
+ "128025": {
205
+ "content": "<|reserved_special_token_17|>",
206
+ "lstrip": false,
207
+ "normalized": false,
208
+ "rstrip": false,
209
+ "single_word": false,
210
+ "special": true
211
+ },
212
+ "128026": {
213
+ "content": "<|reserved_special_token_18|>",
214
+ "lstrip": false,
215
+ "normalized": false,
216
+ "rstrip": false,
217
+ "single_word": false,
218
+ "special": true
219
+ },
220
+ "128027": {
221
+ "content": "<|reserved_special_token_19|>",
222
+ "lstrip": false,
223
+ "normalized": false,
224
+ "rstrip": false,
225
+ "single_word": false,
226
+ "special": true
227
+ },
228
+ "128028": {
229
+ "content": "<|reserved_special_token_20|>",
230
+ "lstrip": false,
231
+ "normalized": false,
232
+ "rstrip": false,
233
+ "single_word": false,
234
+ "special": true
235
+ },
236
+ "128029": {
237
+ "content": "<|reserved_special_token_21|>",
238
+ "lstrip": false,
239
+ "normalized": false,
240
+ "rstrip": false,
241
+ "single_word": false,
242
+ "special": true
243
+ },
244
+ "128030": {
245
+ "content": "<|reserved_special_token_22|>",
246
+ "lstrip": false,
247
+ "normalized": false,
248
+ "rstrip": false,
249
+ "single_word": false,
250
+ "special": true
251
+ },
252
+ "128031": {
253
+ "content": "<|reserved_special_token_23|>",
254
+ "lstrip": false,
255
+ "normalized": false,
256
+ "rstrip": false,
257
+ "single_word": false,
258
+ "special": true
259
+ },
260
+ "128032": {
261
+ "content": "<|reserved_special_token_24|>",
262
+ "lstrip": false,
263
+ "normalized": false,
264
+ "rstrip": false,
265
+ "single_word": false,
266
+ "special": true
267
+ },
268
+ "128033": {
269
+ "content": "<|reserved_special_token_25|>",
270
+ "lstrip": false,
271
+ "normalized": false,
272
+ "rstrip": false,
273
+ "single_word": false,
274
+ "special": true
275
+ },
276
+ "128034": {
277
+ "content": "<|reserved_special_token_26|>",
278
+ "lstrip": false,
279
+ "normalized": false,
280
+ "rstrip": false,
281
+ "single_word": false,
282
+ "special": true
283
+ },
284
+ "128035": {
285
+ "content": "<|reserved_special_token_27|>",
286
+ "lstrip": false,
287
+ "normalized": false,
288
+ "rstrip": false,
289
+ "single_word": false,
290
+ "special": true
291
+ },
292
+ "128036": {
293
+ "content": "<|reserved_special_token_28|>",
294
+ "lstrip": false,
295
+ "normalized": false,
296
+ "rstrip": false,
297
+ "single_word": false,
298
+ "special": true
299
+ },
300
+ "128037": {
301
+ "content": "<|reserved_special_token_29|>",
302
+ "lstrip": false,
303
+ "normalized": false,
304
+ "rstrip": false,
305
+ "single_word": false,
306
+ "special": true
307
+ },
308
+ "128038": {
309
+ "content": "<|reserved_special_token_30|>",
310
+ "lstrip": false,
311
+ "normalized": false,
312
+ "rstrip": false,
313
+ "single_word": false,
314
+ "special": true
315
+ },
316
+ "128039": {
317
+ "content": "<|reserved_special_token_31|>",
318
+ "lstrip": false,
319
+ "normalized": false,
320
+ "rstrip": false,
321
+ "single_word": false,
322
+ "special": true
323
+ },
324
+ "128040": {
325
+ "content": "<|reserved_special_token_32|>",
326
+ "lstrip": false,
327
+ "normalized": false,
328
+ "rstrip": false,
329
+ "single_word": false,
330
+ "special": true
331
+ },
332
+ "128041": {
333
+ "content": "<|reserved_special_token_33|>",
334
+ "lstrip": false,
335
+ "normalized": false,
336
+ "rstrip": false,
337
+ "single_word": false,
338
+ "special": true
339
+ },
340
+ "128042": {
341
+ "content": "<|reserved_special_token_34|>",
342
+ "lstrip": false,
343
+ "normalized": false,
344
+ "rstrip": false,
345
+ "single_word": false,
346
+ "special": true
347
+ },
348
+ "128043": {
349
+ "content": "<|reserved_special_token_35|>",
350
+ "lstrip": false,
351
+ "normalized": false,
352
+ "rstrip": false,
353
+ "single_word": false,
354
+ "special": true
355
+ },
356
+ "128044": {
357
+ "content": "<|reserved_special_token_36|>",
358
+ "lstrip": false,
359
+ "normalized": false,
360
+ "rstrip": false,
361
+ "single_word": false,
362
+ "special": true
363
+ },
364
+ "128045": {
365
+ "content": "<|reserved_special_token_37|>",
366
+ "lstrip": false,
367
+ "normalized": false,
368
+ "rstrip": false,
369
+ "single_word": false,
370
+ "special": true
371
+ },
372
+ "128046": {
373
+ "content": "<|reserved_special_token_38|>",
374
+ "lstrip": false,
375
+ "normalized": false,
376
+ "rstrip": false,
377
+ "single_word": false,
378
+ "special": true
379
+ },
380
+ "128047": {
381
+ "content": "<|reserved_special_token_39|>",
382
+ "lstrip": false,
383
+ "normalized": false,
384
+ "rstrip": false,
385
+ "single_word": false,
386
+ "special": true
387
+ },
388
+ "128048": {
389
+ "content": "<|reserved_special_token_40|>",
390
+ "lstrip": false,
391
+ "normalized": false,
392
+ "rstrip": false,
393
+ "single_word": false,
394
+ "special": true
395
+ },
396
+ "128049": {
397
+ "content": "<|reserved_special_token_41|>",
398
+ "lstrip": false,
399
+ "normalized": false,
400
+ "rstrip": false,
401
+ "single_word": false,
402
+ "special": true
403
+ },
404
+ "128050": {
405
+ "content": "<|reserved_special_token_42|>",
406
+ "lstrip": false,
407
+ "normalized": false,
408
+ "rstrip": false,
409
+ "single_word": false,
410
+ "special": true
411
+ },
412
+ "128051": {
413
+ "content": "<|reserved_special_token_43|>",
414
+ "lstrip": false,
415
+ "normalized": false,
416
+ "rstrip": false,
417
+ "single_word": false,
418
+ "special": true
419
+ },
420
+ "128052": {
421
+ "content": "<|reserved_special_token_44|>",
422
+ "lstrip": false,
423
+ "normalized": false,
424
+ "rstrip": false,
425
+ "single_word": false,
426
+ "special": true
427
+ },
428
+ "128053": {
429
+ "content": "<|reserved_special_token_45|>",
430
+ "lstrip": false,
431
+ "normalized": false,
432
+ "rstrip": false,
433
+ "single_word": false,
434
+ "special": true
435
+ },
436
+ "128054": {
437
+ "content": "<|reserved_special_token_46|>",
438
+ "lstrip": false,
439
+ "normalized": false,
440
+ "rstrip": false,
441
+ "single_word": false,
442
+ "special": true
443
+ },
444
+ "128055": {
445
+ "content": "<|reserved_special_token_47|>",
446
+ "lstrip": false,
447
+ "normalized": false,
448
+ "rstrip": false,
449
+ "single_word": false,
450
+ "special": true
451
+ },
452
+ "128056": {
453
+ "content": "<|reserved_special_token_48|>",
454
+ "lstrip": false,
455
+ "normalized": false,
456
+ "rstrip": false,
457
+ "single_word": false,
458
+ "special": true
459
+ },
460
+ "128057": {
461
+ "content": "<|reserved_special_token_49|>",
462
+ "lstrip": false,
463
+ "normalized": false,
464
+ "rstrip": false,
465
+ "single_word": false,
466
+ "special": true
467
+ },
468
+ "128058": {
469
+ "content": "<|reserved_special_token_50|>",
470
+ "lstrip": false,
471
+ "normalized": false,
472
+ "rstrip": false,
473
+ "single_word": false,
474
+ "special": true
475
+ },
476
+ "128059": {
477
+ "content": "<|reserved_special_token_51|>",
478
+ "lstrip": false,
479
+ "normalized": false,
480
+ "rstrip": false,
481
+ "single_word": false,
482
+ "special": true
483
+ },
484
+ "128060": {
485
+ "content": "<|reserved_special_token_52|>",
486
+ "lstrip": false,
487
+ "normalized": false,
488
+ "rstrip": false,
489
+ "single_word": false,
490
+ "special": true
491
+ },
492
+ "128061": {
493
+ "content": "<|reserved_special_token_53|>",
494
+ "lstrip": false,
495
+ "normalized": false,
496
+ "rstrip": false,
497
+ "single_word": false,
498
+ "special": true
499
+ },
500
+ "128062": {
501
+ "content": "<|reserved_special_token_54|>",
502
+ "lstrip": false,
503
+ "normalized": false,
504
+ "rstrip": false,
505
+ "single_word": false,
506
+ "special": true
507
+ },
508
+ "128063": {
509
+ "content": "<|reserved_special_token_55|>",
510
+ "lstrip": false,
511
+ "normalized": false,
512
+ "rstrip": false,
513
+ "single_word": false,
514
+ "special": true
515
+ },
516
+ "128064": {
517
+ "content": "<|reserved_special_token_56|>",
518
+ "lstrip": false,
519
+ "normalized": false,
520
+ "rstrip": false,
521
+ "single_word": false,
522
+ "special": true
523
+ },
524
+ "128065": {
525
+ "content": "<|reserved_special_token_57|>",
526
+ "lstrip": false,
527
+ "normalized": false,
528
+ "rstrip": false,
529
+ "single_word": false,
530
+ "special": true
531
+ },
532
+ "128066": {
533
+ "content": "<|reserved_special_token_58|>",
534
+ "lstrip": false,
535
+ "normalized": false,
536
+ "rstrip": false,
537
+ "single_word": false,
538
+ "special": true
539
+ },
540
+ "128067": {
541
+ "content": "<|reserved_special_token_59|>",
542
+ "lstrip": false,
543
+ "normalized": false,
544
+ "rstrip": false,
545
+ "single_word": false,
546
+ "special": true
547
+ },
548
+ "128068": {
549
+ "content": "<|reserved_special_token_60|>",
550
+ "lstrip": false,
551
+ "normalized": false,
552
+ "rstrip": false,
553
+ "single_word": false,
554
+ "special": true
555
+ },
556
+ "128069": {
557
+ "content": "<|reserved_special_token_61|>",
558
+ "lstrip": false,
559
+ "normalized": false,
560
+ "rstrip": false,
561
+ "single_word": false,
562
+ "special": true
563
+ },
564
+ "128070": {
565
+ "content": "<|reserved_special_token_62|>",
566
+ "lstrip": false,
567
+ "normalized": false,
568
+ "rstrip": false,
569
+ "single_word": false,
570
+ "special": true
571
+ },
572
+ "128071": {
573
+ "content": "<|reserved_special_token_63|>",
574
+ "lstrip": false,
575
+ "normalized": false,
576
+ "rstrip": false,
577
+ "single_word": false,
578
+ "special": true
579
+ },
580
+ "128072": {
581
+ "content": "<|reserved_special_token_64|>",
582
+ "lstrip": false,
583
+ "normalized": false,
584
+ "rstrip": false,
585
+ "single_word": false,
586
+ "special": true
587
+ },
588
+ "128073": {
589
+ "content": "<|reserved_special_token_65|>",
590
+ "lstrip": false,
591
+ "normalized": false,
592
+ "rstrip": false,
593
+ "single_word": false,
594
+ "special": true
595
+ },
596
+ "128074": {
597
+ "content": "<|reserved_special_token_66|>",
598
+ "lstrip": false,
599
+ "normalized": false,
600
+ "rstrip": false,
601
+ "single_word": false,
602
+ "special": true
603
+ },
604
+ "128075": {
605
+ "content": "<|reserved_special_token_67|>",
606
+ "lstrip": false,
607
+ "normalized": false,
608
+ "rstrip": false,
609
+ "single_word": false,
610
+ "special": true
611
+ },
612
+ "128076": {
613
+ "content": "<|reserved_special_token_68|>",
614
+ "lstrip": false,
615
+ "normalized": false,
616
+ "rstrip": false,
617
+ "single_word": false,
618
+ "special": true
619
+ },
620
+ "128077": {
621
+ "content": "<|reserved_special_token_69|>",
622
+ "lstrip": false,
623
+ "normalized": false,
624
+ "rstrip": false,
625
+ "single_word": false,
626
+ "special": true
627
+ },
628
+ "128078": {
629
+ "content": "<|reserved_special_token_70|>",
630
+ "lstrip": false,
631
+ "normalized": false,
632
+ "rstrip": false,
633
+ "single_word": false,
634
+ "special": true
635
+ },
636
+ "128079": {
637
+ "content": "<|reserved_special_token_71|>",
638
+ "lstrip": false,
639
+ "normalized": false,
640
+ "rstrip": false,
641
+ "single_word": false,
642
+ "special": true
643
+ },
644
+ "128080": {
645
+ "content": "<|reserved_special_token_72|>",
646
+ "lstrip": false,
647
+ "normalized": false,
648
+ "rstrip": false,
649
+ "single_word": false,
650
+ "special": true
651
+ },
652
+ "128081": {
653
+ "content": "<|reserved_special_token_73|>",
654
+ "lstrip": false,
655
+ "normalized": false,
656
+ "rstrip": false,
657
+ "single_word": false,
658
+ "special": true
659
+ },
660
+ "128082": {
661
+ "content": "<|reserved_special_token_74|>",
662
+ "lstrip": false,
663
+ "normalized": false,
664
+ "rstrip": false,
665
+ "single_word": false,
666
+ "special": true
667
+ },
668
+ "128083": {
669
+ "content": "<|reserved_special_token_75|>",
670
+ "lstrip": false,
671
+ "normalized": false,
672
+ "rstrip": false,
673
+ "single_word": false,
674
+ "special": true
675
+ },
676
+ "128084": {
677
+ "content": "<|reserved_special_token_76|>",
678
+ "lstrip": false,
679
+ "normalized": false,
680
+ "rstrip": false,
681
+ "single_word": false,
682
+ "special": true
683
+ },
684
+ "128085": {
685
+ "content": "<|reserved_special_token_77|>",
686
+ "lstrip": false,
687
+ "normalized": false,
688
+ "rstrip": false,
689
+ "single_word": false,
690
+ "special": true
691
+ },
692
+ "128086": {
693
+ "content": "<|reserved_special_token_78|>",
694
+ "lstrip": false,
695
+ "normalized": false,
696
+ "rstrip": false,
697
+ "single_word": false,
698
+ "special": true
699
+ },
700
+ "128087": {
701
+ "content": "<|reserved_special_token_79|>",
702
+ "lstrip": false,
703
+ "normalized": false,
704
+ "rstrip": false,
705
+ "single_word": false,
706
+ "special": true
707
+ },
708
+ "128088": {
709
+ "content": "<|reserved_special_token_80|>",
710
+ "lstrip": false,
711
+ "normalized": false,
712
+ "rstrip": false,
713
+ "single_word": false,
714
+ "special": true
715
+ },
716
+ "128089": {
717
+ "content": "<|reserved_special_token_81|>",
718
+ "lstrip": false,
719
+ "normalized": false,
720
+ "rstrip": false,
721
+ "single_word": false,
722
+ "special": true
723
+ },
724
+ "128090": {
725
+ "content": "<|reserved_special_token_82|>",
726
+ "lstrip": false,
727
+ "normalized": false,
728
+ "rstrip": false,
729
+ "single_word": false,
730
+ "special": true
731
+ },
732
+ "128091": {
733
+ "content": "<|reserved_special_token_83|>",
734
+ "lstrip": false,
735
+ "normalized": false,
736
+ "rstrip": false,
737
+ "single_word": false,
738
+ "special": true
739
+ },
740
+ "128092": {
741
+ "content": "<|reserved_special_token_84|>",
742
+ "lstrip": false,
743
+ "normalized": false,
744
+ "rstrip": false,
745
+ "single_word": false,
746
+ "special": true
747
+ },
748
+ "128093": {
749
+ "content": "<|reserved_special_token_85|>",
750
+ "lstrip": false,
751
+ "normalized": false,
752
+ "rstrip": false,
753
+ "single_word": false,
754
+ "special": true
755
+ },
756
+ "128094": {
757
+ "content": "<|reserved_special_token_86|>",
758
+ "lstrip": false,
759
+ "normalized": false,
760
+ "rstrip": false,
761
+ "single_word": false,
762
+ "special": true
763
+ },
764
+ "128095": {
765
+ "content": "<|reserved_special_token_87|>",
766
+ "lstrip": false,
767
+ "normalized": false,
768
+ "rstrip": false,
769
+ "single_word": false,
770
+ "special": true
771
+ },
772
+ "128096": {
773
+ "content": "<|reserved_special_token_88|>",
774
+ "lstrip": false,
775
+ "normalized": false,
776
+ "rstrip": false,
777
+ "single_word": false,
778
+ "special": true
779
+ },
780
+ "128097": {
781
+ "content": "<|reserved_special_token_89|>",
782
+ "lstrip": false,
783
+ "normalized": false,
784
+ "rstrip": false,
785
+ "single_word": false,
786
+ "special": true
787
+ },
788
+ "128098": {
789
+ "content": "<|reserved_special_token_90|>",
790
+ "lstrip": false,
791
+ "normalized": false,
792
+ "rstrip": false,
793
+ "single_word": false,
794
+ "special": true
795
+ },
796
+ "128099": {
797
+ "content": "<|reserved_special_token_91|>",
798
+ "lstrip": false,
799
+ "normalized": false,
800
+ "rstrip": false,
801
+ "single_word": false,
802
+ "special": true
803
+ },
804
+ "128100": {
805
+ "content": "<|reserved_special_token_92|>",
806
+ "lstrip": false,
807
+ "normalized": false,
808
+ "rstrip": false,
809
+ "single_word": false,
810
+ "special": true
811
+ },
812
+ "128101": {
813
+ "content": "<|reserved_special_token_93|>",
814
+ "lstrip": false,
815
+ "normalized": false,
816
+ "rstrip": false,
817
+ "single_word": false,
818
+ "special": true
819
+ },
820
+ "128102": {
821
+ "content": "<|reserved_special_token_94|>",
822
+ "lstrip": false,
823
+ "normalized": false,
824
+ "rstrip": false,
825
+ "single_word": false,
826
+ "special": true
827
+ },
828
+ "128103": {
829
+ "content": "<|reserved_special_token_95|>",
830
+ "lstrip": false,
831
+ "normalized": false,
832
+ "rstrip": false,
833
+ "single_word": false,
834
+ "special": true
835
+ },
836
+ "128104": {
837
+ "content": "<|reserved_special_token_96|>",
838
+ "lstrip": false,
839
+ "normalized": false,
840
+ "rstrip": false,
841
+ "single_word": false,
842
+ "special": true
843
+ },
844
+ "128105": {
845
+ "content": "<|reserved_special_token_97|>",
846
+ "lstrip": false,
847
+ "normalized": false,
848
+ "rstrip": false,
849
+ "single_word": false,
850
+ "special": true
851
+ },
852
+ "128106": {
853
+ "content": "<|reserved_special_token_98|>",
854
+ "lstrip": false,
855
+ "normalized": false,
856
+ "rstrip": false,
857
+ "single_word": false,
858
+ "special": true
859
+ },
860
+ "128107": {
861
+ "content": "<|reserved_special_token_99|>",
862
+ "lstrip": false,
863
+ "normalized": false,
864
+ "rstrip": false,
865
+ "single_word": false,
866
+ "special": true
867
+ },
868
+ "128108": {
869
+ "content": "<|reserved_special_token_100|>",
870
+ "lstrip": false,
871
+ "normalized": false,
872
+ "rstrip": false,
873
+ "single_word": false,
874
+ "special": true
875
+ },
876
+ "128109": {
877
+ "content": "<|reserved_special_token_101|>",
878
+ "lstrip": false,
879
+ "normalized": false,
880
+ "rstrip": false,
881
+ "single_word": false,
882
+ "special": true
883
+ },
884
+ "128110": {
885
+ "content": "<|reserved_special_token_102|>",
886
+ "lstrip": false,
887
+ "normalized": false,
888
+ "rstrip": false,
889
+ "single_word": false,
890
+ "special": true
891
+ },
892
+ "128111": {
893
+ "content": "<|reserved_special_token_103|>",
894
+ "lstrip": false,
895
+ "normalized": false,
896
+ "rstrip": false,
897
+ "single_word": false,
898
+ "special": true
899
+ },
900
+ "128112": {
901
+ "content": "<|reserved_special_token_104|>",
902
+ "lstrip": false,
903
+ "normalized": false,
904
+ "rstrip": false,
905
+ "single_word": false,
906
+ "special": true
907
+ },
908
+ "128113": {
909
+ "content": "<|reserved_special_token_105|>",
910
+ "lstrip": false,
911
+ "normalized": false,
912
+ "rstrip": false,
913
+ "single_word": false,
914
+ "special": true
915
+ },
916
+ "128114": {
917
+ "content": "<|reserved_special_token_106|>",
918
+ "lstrip": false,
919
+ "normalized": false,
920
+ "rstrip": false,
921
+ "single_word": false,
922
+ "special": true
923
+ },
924
+ "128115": {
925
+ "content": "<|reserved_special_token_107|>",
926
+ "lstrip": false,
927
+ "normalized": false,
928
+ "rstrip": false,
929
+ "single_word": false,
930
+ "special": true
931
+ },
932
+ "128116": {
933
+ "content": "<|reserved_special_token_108|>",
934
+ "lstrip": false,
935
+ "normalized": false,
936
+ "rstrip": false,
937
+ "single_word": false,
938
+ "special": true
939
+ },
940
+ "128117": {
941
+ "content": "<|reserved_special_token_109|>",
942
+ "lstrip": false,
943
+ "normalized": false,
944
+ "rstrip": false,
945
+ "single_word": false,
946
+ "special": true
947
+ },
948
+ "128118": {
949
+ "content": "<|reserved_special_token_110|>",
950
+ "lstrip": false,
951
+ "normalized": false,
952
+ "rstrip": false,
953
+ "single_word": false,
954
+ "special": true
955
+ },
956
+ "128119": {
957
+ "content": "<|reserved_special_token_111|>",
958
+ "lstrip": false,
959
+ "normalized": false,
960
+ "rstrip": false,
961
+ "single_word": false,
962
+ "special": true
963
+ },
964
+ "128120": {
965
+ "content": "<|reserved_special_token_112|>",
966
+ "lstrip": false,
967
+ "normalized": false,
968
+ "rstrip": false,
969
+ "single_word": false,
970
+ "special": true
971
+ },
972
+ "128121": {
973
+ "content": "<|reserved_special_token_113|>",
974
+ "lstrip": false,
975
+ "normalized": false,
976
+ "rstrip": false,
977
+ "single_word": false,
978
+ "special": true
979
+ },
980
+ "128122": {
981
+ "content": "<|reserved_special_token_114|>",
982
+ "lstrip": false,
983
+ "normalized": false,
984
+ "rstrip": false,
985
+ "single_word": false,
986
+ "special": true
987
+ },
988
+ "128123": {
989
+ "content": "<|reserved_special_token_115|>",
990
+ "lstrip": false,
991
+ "normalized": false,
992
+ "rstrip": false,
993
+ "single_word": false,
994
+ "special": true
995
+ },
996
+ "128124": {
997
+ "content": "<|reserved_special_token_116|>",
998
+ "lstrip": false,
999
+ "normalized": false,
1000
+ "rstrip": false,
1001
+ "single_word": false,
1002
+ "special": true
1003
+ },
1004
+ "128125": {
1005
+ "content": "<|reserved_special_token_117|>",
1006
+ "lstrip": false,
1007
+ "normalized": false,
1008
+ "rstrip": false,
1009
+ "single_word": false,
1010
+ "special": true
1011
+ },
1012
+ "128126": {
1013
+ "content": "<|reserved_special_token_118|>",
1014
+ "lstrip": false,
1015
+ "normalized": false,
1016
+ "rstrip": false,
1017
+ "single_word": false,
1018
+ "special": true
1019
+ },
1020
+ "128127": {
1021
+ "content": "<|reserved_special_token_119|>",
1022
+ "lstrip": false,
1023
+ "normalized": false,
1024
+ "rstrip": false,
1025
+ "single_word": false,
1026
+ "special": true
1027
+ },
1028
+ "128128": {
1029
+ "content": "<|reserved_special_token_120|>",
1030
+ "lstrip": false,
1031
+ "normalized": false,
1032
+ "rstrip": false,
1033
+ "single_word": false,
1034
+ "special": true
1035
+ },
1036
+ "128129": {
1037
+ "content": "<|reserved_special_token_121|>",
1038
+ "lstrip": false,
1039
+ "normalized": false,
1040
+ "rstrip": false,
1041
+ "single_word": false,
1042
+ "special": true
1043
+ },
1044
+ "128130": {
1045
+ "content": "<|reserved_special_token_122|>",
1046
+ "lstrip": false,
1047
+ "normalized": false,
1048
+ "rstrip": false,
1049
+ "single_word": false,
1050
+ "special": true
1051
+ },
1052
+ "128131": {
1053
+ "content": "<|reserved_special_token_123|>",
1054
+ "lstrip": false,
1055
+ "normalized": false,
1056
+ "rstrip": false,
1057
+ "single_word": false,
1058
+ "special": true
1059
+ },
1060
+ "128132": {
1061
+ "content": "<|reserved_special_token_124|>",
1062
+ "lstrip": false,
1063
+ "normalized": false,
1064
+ "rstrip": false,
1065
+ "single_word": false,
1066
+ "special": true
1067
+ },
1068
+ "128133": {
1069
+ "content": "<|reserved_special_token_125|>",
1070
+ "lstrip": false,
1071
+ "normalized": false,
1072
+ "rstrip": false,
1073
+ "single_word": false,
1074
+ "special": true
1075
+ },
1076
+ "128134": {
1077
+ "content": "<|reserved_special_token_126|>",
1078
+ "lstrip": false,
1079
+ "normalized": false,
1080
+ "rstrip": false,
1081
+ "single_word": false,
1082
+ "special": true
1083
+ },
1084
+ "128135": {
1085
+ "content": "<|reserved_special_token_127|>",
1086
+ "lstrip": false,
1087
+ "normalized": false,
1088
+ "rstrip": false,
1089
+ "single_word": false,
1090
+ "special": true
1091
+ },
1092
+ "128136": {
1093
+ "content": "<|reserved_special_token_128|>",
1094
+ "lstrip": false,
1095
+ "normalized": false,
1096
+ "rstrip": false,
1097
+ "single_word": false,
1098
+ "special": true
1099
+ },
1100
+ "128137": {
1101
+ "content": "<|reserved_special_token_129|>",
1102
+ "lstrip": false,
1103
+ "normalized": false,
1104
+ "rstrip": false,
1105
+ "single_word": false,
1106
+ "special": true
1107
+ },
1108
+ "128138": {
1109
+ "content": "<|reserved_special_token_130|>",
1110
+ "lstrip": false,
1111
+ "normalized": false,
1112
+ "rstrip": false,
1113
+ "single_word": false,
1114
+ "special": true
1115
+ },
1116
+ "128139": {
1117
+ "content": "<|reserved_special_token_131|>",
1118
+ "lstrip": false,
1119
+ "normalized": false,
1120
+ "rstrip": false,
1121
+ "single_word": false,
1122
+ "special": true
1123
+ },
1124
+ "128140": {
1125
+ "content": "<|reserved_special_token_132|>",
1126
+ "lstrip": false,
1127
+ "normalized": false,
1128
+ "rstrip": false,
1129
+ "single_word": false,
1130
+ "special": true
1131
+ },
1132
+ "128141": {
1133
+ "content": "<|reserved_special_token_133|>",
1134
+ "lstrip": false,
1135
+ "normalized": false,
1136
+ "rstrip": false,
1137
+ "single_word": false,
1138
+ "special": true
1139
+ },
1140
+ "128142": {
1141
+ "content": "<|reserved_special_token_134|>",
1142
+ "lstrip": false,
1143
+ "normalized": false,
1144
+ "rstrip": false,
1145
+ "single_word": false,
1146
+ "special": true
1147
+ },
1148
+ "128143": {
1149
+ "content": "<|reserved_special_token_135|>",
1150
+ "lstrip": false,
1151
+ "normalized": false,
1152
+ "rstrip": false,
1153
+ "single_word": false,
1154
+ "special": true
1155
+ },
1156
+ "128144": {
1157
+ "content": "<|reserved_special_token_136|>",
1158
+ "lstrip": false,
1159
+ "normalized": false,
1160
+ "rstrip": false,
1161
+ "single_word": false,
1162
+ "special": true
1163
+ },
1164
+ "128145": {
1165
+ "content": "<|reserved_special_token_137|>",
1166
+ "lstrip": false,
1167
+ "normalized": false,
1168
+ "rstrip": false,
1169
+ "single_word": false,
1170
+ "special": true
1171
+ },
1172
+ "128146": {
1173
+ "content": "<|reserved_special_token_138|>",
1174
+ "lstrip": false,
1175
+ "normalized": false,
1176
+ "rstrip": false,
1177
+ "single_word": false,
1178
+ "special": true
1179
+ },
1180
+ "128147": {
1181
+ "content": "<|reserved_special_token_139|>",
1182
+ "lstrip": false,
1183
+ "normalized": false,
1184
+ "rstrip": false,
1185
+ "single_word": false,
1186
+ "special": true
1187
+ },
1188
+ "128148": {
1189
+ "content": "<|reserved_special_token_140|>",
1190
+ "lstrip": false,
1191
+ "normalized": false,
1192
+ "rstrip": false,
1193
+ "single_word": false,
1194
+ "special": true
1195
+ },
1196
+ "128149": {
1197
+ "content": "<|reserved_special_token_141|>",
1198
+ "lstrip": false,
1199
+ "normalized": false,
1200
+ "rstrip": false,
1201
+ "single_word": false,
1202
+ "special": true
1203
+ },
1204
+ "128150": {
1205
+ "content": "<|reserved_special_token_142|>",
1206
+ "lstrip": false,
1207
+ "normalized": false,
1208
+ "rstrip": false,
1209
+ "single_word": false,
1210
+ "special": true
1211
+ },
1212
+ "128151": {
1213
+ "content": "<|reserved_special_token_143|>",
1214
+ "lstrip": false,
1215
+ "normalized": false,
1216
+ "rstrip": false,
1217
+ "single_word": false,
1218
+ "special": true
1219
+ },
1220
+ "128152": {
1221
+ "content": "<|reserved_special_token_144|>",
1222
+ "lstrip": false,
1223
+ "normalized": false,
1224
+ "rstrip": false,
1225
+ "single_word": false,
1226
+ "special": true
1227
+ },
1228
+ "128153": {
1229
+ "content": "<|reserved_special_token_145|>",
1230
+ "lstrip": false,
1231
+ "normalized": false,
1232
+ "rstrip": false,
1233
+ "single_word": false,
1234
+ "special": true
1235
+ },
1236
+ "128154": {
1237
+ "content": "<|reserved_special_token_146|>",
1238
+ "lstrip": false,
1239
+ "normalized": false,
1240
+ "rstrip": false,
1241
+ "single_word": false,
1242
+ "special": true
1243
+ },
1244
+ "128155": {
1245
+ "content": "<|reserved_special_token_147|>",
1246
+ "lstrip": false,
1247
+ "normalized": false,
1248
+ "rstrip": false,
1249
+ "single_word": false,
1250
+ "special": true
1251
+ },
1252
+ "128156": {
1253
+ "content": "<|reserved_special_token_148|>",
1254
+ "lstrip": false,
1255
+ "normalized": false,
1256
+ "rstrip": false,
1257
+ "single_word": false,
1258
+ "special": true
1259
+ },
1260
+ "128157": {
1261
+ "content": "<|reserved_special_token_149|>",
1262
+ "lstrip": false,
1263
+ "normalized": false,
1264
+ "rstrip": false,
1265
+ "single_word": false,
1266
+ "special": true
1267
+ },
1268
+ "128158": {
1269
+ "content": "<|reserved_special_token_150|>",
1270
+ "lstrip": false,
1271
+ "normalized": false,
1272
+ "rstrip": false,
1273
+ "single_word": false,
1274
+ "special": true
1275
+ },
1276
+ "128159": {
1277
+ "content": "<|reserved_special_token_151|>",
1278
+ "lstrip": false,
1279
+ "normalized": false,
1280
+ "rstrip": false,
1281
+ "single_word": false,
1282
+ "special": true
1283
+ },
1284
+ "128160": {
1285
+ "content": "<|reserved_special_token_152|>",
1286
+ "lstrip": false,
1287
+ "normalized": false,
1288
+ "rstrip": false,
1289
+ "single_word": false,
1290
+ "special": true
1291
+ },
1292
+ "128161": {
1293
+ "content": "<|reserved_special_token_153|>",
1294
+ "lstrip": false,
1295
+ "normalized": false,
1296
+ "rstrip": false,
1297
+ "single_word": false,
1298
+ "special": true
1299
+ },
1300
+ "128162": {
1301
+ "content": "<|reserved_special_token_154|>",
1302
+ "lstrip": false,
1303
+ "normalized": false,
1304
+ "rstrip": false,
1305
+ "single_word": false,
1306
+ "special": true
1307
+ },
1308
+ "128163": {
1309
+ "content": "<|reserved_special_token_155|>",
1310
+ "lstrip": false,
1311
+ "normalized": false,
1312
+ "rstrip": false,
1313
+ "single_word": false,
1314
+ "special": true
1315
+ },
1316
+ "128164": {
1317
+ "content": "<|reserved_special_token_156|>",
1318
+ "lstrip": false,
1319
+ "normalized": false,
1320
+ "rstrip": false,
1321
+ "single_word": false,
1322
+ "special": true
1323
+ },
1324
+ "128165": {
1325
+ "content": "<|reserved_special_token_157|>",
1326
+ "lstrip": false,
1327
+ "normalized": false,
1328
+ "rstrip": false,
1329
+ "single_word": false,
1330
+ "special": true
1331
+ },
1332
+ "128166": {
1333
+ "content": "<|reserved_special_token_158|>",
1334
+ "lstrip": false,
1335
+ "normalized": false,
1336
+ "rstrip": false,
1337
+ "single_word": false,
1338
+ "special": true
1339
+ },
1340
+ "128167": {
1341
+ "content": "<|reserved_special_token_159|>",
1342
+ "lstrip": false,
1343
+ "normalized": false,
1344
+ "rstrip": false,
1345
+ "single_word": false,
1346
+ "special": true
1347
+ },
1348
+ "128168": {
1349
+ "content": "<|reserved_special_token_160|>",
1350
+ "lstrip": false,
1351
+ "normalized": false,
1352
+ "rstrip": false,
1353
+ "single_word": false,
1354
+ "special": true
1355
+ },
1356
+ "128169": {
1357
+ "content": "<|reserved_special_token_161|>",
1358
+ "lstrip": false,
1359
+ "normalized": false,
1360
+ "rstrip": false,
1361
+ "single_word": false,
1362
+ "special": true
1363
+ },
1364
+ "128170": {
1365
+ "content": "<|reserved_special_token_162|>",
1366
+ "lstrip": false,
1367
+ "normalized": false,
1368
+ "rstrip": false,
1369
+ "single_word": false,
1370
+ "special": true
1371
+ },
1372
+ "128171": {
1373
+ "content": "<|reserved_special_token_163|>",
1374
+ "lstrip": false,
1375
+ "normalized": false,
1376
+ "rstrip": false,
1377
+ "single_word": false,
1378
+ "special": true
1379
+ },
1380
+ "128172": {
1381
+ "content": "<|reserved_special_token_164|>",
1382
+ "lstrip": false,
1383
+ "normalized": false,
1384
+ "rstrip": false,
1385
+ "single_word": false,
1386
+ "special": true
1387
+ },
1388
+ "128173": {
1389
+ "content": "<|reserved_special_token_165|>",
1390
+ "lstrip": false,
1391
+ "normalized": false,
1392
+ "rstrip": false,
1393
+ "single_word": false,
1394
+ "special": true
1395
+ },
1396
+ "128174": {
1397
+ "content": "<|reserved_special_token_166|>",
1398
+ "lstrip": false,
1399
+ "normalized": false,
1400
+ "rstrip": false,
1401
+ "single_word": false,
1402
+ "special": true
1403
+ },
1404
+ "128175": {
1405
+ "content": "<|reserved_special_token_167|>",
1406
+ "lstrip": false,
1407
+ "normalized": false,
1408
+ "rstrip": false,
1409
+ "single_word": false,
1410
+ "special": true
1411
+ },
1412
+ "128176": {
1413
+ "content": "<|reserved_special_token_168|>",
1414
+ "lstrip": false,
1415
+ "normalized": false,
1416
+ "rstrip": false,
1417
+ "single_word": false,
1418
+ "special": true
1419
+ },
1420
+ "128177": {
1421
+ "content": "<|reserved_special_token_169|>",
1422
+ "lstrip": false,
1423
+ "normalized": false,
1424
+ "rstrip": false,
1425
+ "single_word": false,
1426
+ "special": true
1427
+ },
1428
+ "128178": {
1429
+ "content": "<|reserved_special_token_170|>",
1430
+ "lstrip": false,
1431
+ "normalized": false,
1432
+ "rstrip": false,
1433
+ "single_word": false,
1434
+ "special": true
1435
+ },
1436
+ "128179": {
1437
+ "content": "<|reserved_special_token_171|>",
1438
+ "lstrip": false,
1439
+ "normalized": false,
1440
+ "rstrip": false,
1441
+ "single_word": false,
1442
+ "special": true
1443
+ },
1444
+ "128180": {
1445
+ "content": "<|reserved_special_token_172|>",
1446
+ "lstrip": false,
1447
+ "normalized": false,
1448
+ "rstrip": false,
1449
+ "single_word": false,
1450
+ "special": true
1451
+ },
1452
+ "128181": {
1453
+ "content": "<|reserved_special_token_173|>",
1454
+ "lstrip": false,
1455
+ "normalized": false,
1456
+ "rstrip": false,
1457
+ "single_word": false,
1458
+ "special": true
1459
+ },
1460
+ "128182": {
1461
+ "content": "<|reserved_special_token_174|>",
1462
+ "lstrip": false,
1463
+ "normalized": false,
1464
+ "rstrip": false,
1465
+ "single_word": false,
1466
+ "special": true
1467
+ },
1468
+ "128183": {
1469
+ "content": "<|reserved_special_token_175|>",
1470
+ "lstrip": false,
1471
+ "normalized": false,
1472
+ "rstrip": false,
1473
+ "single_word": false,
1474
+ "special": true
1475
+ },
1476
+ "128184": {
1477
+ "content": "<|reserved_special_token_176|>",
1478
+ "lstrip": false,
1479
+ "normalized": false,
1480
+ "rstrip": false,
1481
+ "single_word": false,
1482
+ "special": true
1483
+ },
1484
+ "128185": {
1485
+ "content": "<|reserved_special_token_177|>",
1486
+ "lstrip": false,
1487
+ "normalized": false,
1488
+ "rstrip": false,
1489
+ "single_word": false,
1490
+ "special": true
1491
+ },
1492
+ "128186": {
1493
+ "content": "<|reserved_special_token_178|>",
1494
+ "lstrip": false,
1495
+ "normalized": false,
1496
+ "rstrip": false,
1497
+ "single_word": false,
1498
+ "special": true
1499
+ },
1500
+ "128187": {
1501
+ "content": "<|reserved_special_token_179|>",
1502
+ "lstrip": false,
1503
+ "normalized": false,
1504
+ "rstrip": false,
1505
+ "single_word": false,
1506
+ "special": true
1507
+ },
1508
+ "128188": {
1509
+ "content": "<|reserved_special_token_180|>",
1510
+ "lstrip": false,
1511
+ "normalized": false,
1512
+ "rstrip": false,
1513
+ "single_word": false,
1514
+ "special": true
1515
+ },
1516
+ "128189": {
1517
+ "content": "<|reserved_special_token_181|>",
1518
+ "lstrip": false,
1519
+ "normalized": false,
1520
+ "rstrip": false,
1521
+ "single_word": false,
1522
+ "special": true
1523
+ },
1524
+ "128190": {
1525
+ "content": "<|reserved_special_token_182|>",
1526
+ "lstrip": false,
1527
+ "normalized": false,
1528
+ "rstrip": false,
1529
+ "single_word": false,
1530
+ "special": true
1531
+ },
1532
+ "128191": {
1533
+ "content": "<|reserved_special_token_183|>",
1534
+ "lstrip": false,
1535
+ "normalized": false,
1536
+ "rstrip": false,
1537
+ "single_word": false,
1538
+ "special": true
1539
+ },
1540
+ "128192": {
1541
+ "content": "<|reserved_special_token_184|>",
1542
+ "lstrip": false,
1543
+ "normalized": false,
1544
+ "rstrip": false,
1545
+ "single_word": false,
1546
+ "special": true
1547
+ },
1548
+ "128193": {
1549
+ "content": "<|reserved_special_token_185|>",
1550
+ "lstrip": false,
1551
+ "normalized": false,
1552
+ "rstrip": false,
1553
+ "single_word": false,
1554
+ "special": true
1555
+ },
1556
+ "128194": {
1557
+ "content": "<|reserved_special_token_186|>",
1558
+ "lstrip": false,
1559
+ "normalized": false,
1560
+ "rstrip": false,
1561
+ "single_word": false,
1562
+ "special": true
1563
+ },
1564
+ "128195": {
1565
+ "content": "<|reserved_special_token_187|>",
1566
+ "lstrip": false,
1567
+ "normalized": false,
1568
+ "rstrip": false,
1569
+ "single_word": false,
1570
+ "special": true
1571
+ },
1572
+ "128196": {
1573
+ "content": "<|reserved_special_token_188|>",
1574
+ "lstrip": false,
1575
+ "normalized": false,
1576
+ "rstrip": false,
1577
+ "single_word": false,
1578
+ "special": true
1579
+ },
1580
+ "128197": {
1581
+ "content": "<|reserved_special_token_189|>",
1582
+ "lstrip": false,
1583
+ "normalized": false,
1584
+ "rstrip": false,
1585
+ "single_word": false,
1586
+ "special": true
1587
+ },
1588
+ "128198": {
1589
+ "content": "<|reserved_special_token_190|>",
1590
+ "lstrip": false,
1591
+ "normalized": false,
1592
+ "rstrip": false,
1593
+ "single_word": false,
1594
+ "special": true
1595
+ },
1596
+ "128199": {
1597
+ "content": "<|reserved_special_token_191|>",
1598
+ "lstrip": false,
1599
+ "normalized": false,
1600
+ "rstrip": false,
1601
+ "single_word": false,
1602
+ "special": true
1603
+ },
1604
+ "128200": {
1605
+ "content": "<|reserved_special_token_192|>",
1606
+ "lstrip": false,
1607
+ "normalized": false,
1608
+ "rstrip": false,
1609
+ "single_word": false,
1610
+ "special": true
1611
+ },
1612
+ "128201": {
1613
+ "content": "<|reserved_special_token_193|>",
1614
+ "lstrip": false,
1615
+ "normalized": false,
1616
+ "rstrip": false,
1617
+ "single_word": false,
1618
+ "special": true
1619
+ },
1620
+ "128202": {
1621
+ "content": "<|reserved_special_token_194|>",
1622
+ "lstrip": false,
1623
+ "normalized": false,
1624
+ "rstrip": false,
1625
+ "single_word": false,
1626
+ "special": true
1627
+ },
1628
+ "128203": {
1629
+ "content": "<|reserved_special_token_195|>",
1630
+ "lstrip": false,
1631
+ "normalized": false,
1632
+ "rstrip": false,
1633
+ "single_word": false,
1634
+ "special": true
1635
+ },
1636
+ "128204": {
1637
+ "content": "<|reserved_special_token_196|>",
1638
+ "lstrip": false,
1639
+ "normalized": false,
1640
+ "rstrip": false,
1641
+ "single_word": false,
1642
+ "special": true
1643
+ },
1644
+ "128205": {
1645
+ "content": "<|reserved_special_token_197|>",
1646
+ "lstrip": false,
1647
+ "normalized": false,
1648
+ "rstrip": false,
1649
+ "single_word": false,
1650
+ "special": true
1651
+ },
1652
+ "128206": {
1653
+ "content": "<|reserved_special_token_198|>",
1654
+ "lstrip": false,
1655
+ "normalized": false,
1656
+ "rstrip": false,
1657
+ "single_word": false,
1658
+ "special": true
1659
+ },
1660
+ "128207": {
1661
+ "content": "<|reserved_special_token_199|>",
1662
+ "lstrip": false,
1663
+ "normalized": false,
1664
+ "rstrip": false,
1665
+ "single_word": false,
1666
+ "special": true
1667
+ },
1668
+ "128208": {
1669
+ "content": "<|reserved_special_token_200|>",
1670
+ "lstrip": false,
1671
+ "normalized": false,
1672
+ "rstrip": false,
1673
+ "single_word": false,
1674
+ "special": true
1675
+ },
1676
+ "128209": {
1677
+ "content": "<|reserved_special_token_201|>",
1678
+ "lstrip": false,
1679
+ "normalized": false,
1680
+ "rstrip": false,
1681
+ "single_word": false,
1682
+ "special": true
1683
+ },
1684
+ "128210": {
1685
+ "content": "<|reserved_special_token_202|>",
1686
+ "lstrip": false,
1687
+ "normalized": false,
1688
+ "rstrip": false,
1689
+ "single_word": false,
1690
+ "special": true
1691
+ },
1692
+ "128211": {
1693
+ "content": "<|reserved_special_token_203|>",
1694
+ "lstrip": false,
1695
+ "normalized": false,
1696
+ "rstrip": false,
1697
+ "single_word": false,
1698
+ "special": true
1699
+ },
1700
+ "128212": {
1701
+ "content": "<|reserved_special_token_204|>",
1702
+ "lstrip": false,
1703
+ "normalized": false,
1704
+ "rstrip": false,
1705
+ "single_word": false,
1706
+ "special": true
1707
+ },
1708
+ "128213": {
1709
+ "content": "<|reserved_special_token_205|>",
1710
+ "lstrip": false,
1711
+ "normalized": false,
1712
+ "rstrip": false,
1713
+ "single_word": false,
1714
+ "special": true
1715
+ },
1716
+ "128214": {
1717
+ "content": "<|reserved_special_token_206|>",
1718
+ "lstrip": false,
1719
+ "normalized": false,
1720
+ "rstrip": false,
1721
+ "single_word": false,
1722
+ "special": true
1723
+ },
1724
+ "128215": {
1725
+ "content": "<|reserved_special_token_207|>",
1726
+ "lstrip": false,
1727
+ "normalized": false,
1728
+ "rstrip": false,
1729
+ "single_word": false,
1730
+ "special": true
1731
+ },
1732
+ "128216": {
1733
+ "content": "<|reserved_special_token_208|>",
1734
+ "lstrip": false,
1735
+ "normalized": false,
1736
+ "rstrip": false,
1737
+ "single_word": false,
1738
+ "special": true
1739
+ },
1740
+ "128217": {
1741
+ "content": "<|reserved_special_token_209|>",
1742
+ "lstrip": false,
1743
+ "normalized": false,
1744
+ "rstrip": false,
1745
+ "single_word": false,
1746
+ "special": true
1747
+ },
1748
+ "128218": {
1749
+ "content": "<|reserved_special_token_210|>",
1750
+ "lstrip": false,
1751
+ "normalized": false,
1752
+ "rstrip": false,
1753
+ "single_word": false,
1754
+ "special": true
1755
+ },
1756
+ "128219": {
1757
+ "content": "<|reserved_special_token_211|>",
1758
+ "lstrip": false,
1759
+ "normalized": false,
1760
+ "rstrip": false,
1761
+ "single_word": false,
1762
+ "special": true
1763
+ },
1764
+ "128220": {
1765
+ "content": "<|reserved_special_token_212|>",
1766
+ "lstrip": false,
1767
+ "normalized": false,
1768
+ "rstrip": false,
1769
+ "single_word": false,
1770
+ "special": true
1771
+ },
1772
+ "128221": {
1773
+ "content": "<|reserved_special_token_213|>",
1774
+ "lstrip": false,
1775
+ "normalized": false,
1776
+ "rstrip": false,
1777
+ "single_word": false,
1778
+ "special": true
1779
+ },
1780
+ "128222": {
1781
+ "content": "<|reserved_special_token_214|>",
1782
+ "lstrip": false,
1783
+ "normalized": false,
1784
+ "rstrip": false,
1785
+ "single_word": false,
1786
+ "special": true
1787
+ },
1788
+ "128223": {
1789
+ "content": "<|reserved_special_token_215|>",
1790
+ "lstrip": false,
1791
+ "normalized": false,
1792
+ "rstrip": false,
1793
+ "single_word": false,
1794
+ "special": true
1795
+ },
1796
+ "128224": {
1797
+ "content": "<|reserved_special_token_216|>",
1798
+ "lstrip": false,
1799
+ "normalized": false,
1800
+ "rstrip": false,
1801
+ "single_word": false,
1802
+ "special": true
1803
+ },
1804
+ "128225": {
1805
+ "content": "<|reserved_special_token_217|>",
1806
+ "lstrip": false,
1807
+ "normalized": false,
1808
+ "rstrip": false,
1809
+ "single_word": false,
1810
+ "special": true
1811
+ },
1812
+ "128226": {
1813
+ "content": "<|reserved_special_token_218|>",
1814
+ "lstrip": false,
1815
+ "normalized": false,
1816
+ "rstrip": false,
1817
+ "single_word": false,
1818
+ "special": true
1819
+ },
1820
+ "128227": {
1821
+ "content": "<|reserved_special_token_219|>",
1822
+ "lstrip": false,
1823
+ "normalized": false,
1824
+ "rstrip": false,
1825
+ "single_word": false,
1826
+ "special": true
1827
+ },
1828
+ "128228": {
1829
+ "content": "<|reserved_special_token_220|>",
1830
+ "lstrip": false,
1831
+ "normalized": false,
1832
+ "rstrip": false,
1833
+ "single_word": false,
1834
+ "special": true
1835
+ },
1836
+ "128229": {
1837
+ "content": "<|reserved_special_token_221|>",
1838
+ "lstrip": false,
1839
+ "normalized": false,
1840
+ "rstrip": false,
1841
+ "single_word": false,
1842
+ "special": true
1843
+ },
1844
+ "128230": {
1845
+ "content": "<|reserved_special_token_222|>",
1846
+ "lstrip": false,
1847
+ "normalized": false,
1848
+ "rstrip": false,
1849
+ "single_word": false,
1850
+ "special": true
1851
+ },
1852
+ "128231": {
1853
+ "content": "<|reserved_special_token_223|>",
1854
+ "lstrip": false,
1855
+ "normalized": false,
1856
+ "rstrip": false,
1857
+ "single_word": false,
1858
+ "special": true
1859
+ },
1860
+ "128232": {
1861
+ "content": "<|reserved_special_token_224|>",
1862
+ "lstrip": false,
1863
+ "normalized": false,
1864
+ "rstrip": false,
1865
+ "single_word": false,
1866
+ "special": true
1867
+ },
1868
+ "128233": {
1869
+ "content": "<|reserved_special_token_225|>",
1870
+ "lstrip": false,
1871
+ "normalized": false,
1872
+ "rstrip": false,
1873
+ "single_word": false,
1874
+ "special": true
1875
+ },
1876
+ "128234": {
1877
+ "content": "<|reserved_special_token_226|>",
1878
+ "lstrip": false,
1879
+ "normalized": false,
1880
+ "rstrip": false,
1881
+ "single_word": false,
1882
+ "special": true
1883
+ },
1884
+ "128235": {
1885
+ "content": "<|reserved_special_token_227|>",
1886
+ "lstrip": false,
1887
+ "normalized": false,
1888
+ "rstrip": false,
1889
+ "single_word": false,
1890
+ "special": true
1891
+ },
1892
+ "128236": {
1893
+ "content": "<|reserved_special_token_228|>",
1894
+ "lstrip": false,
1895
+ "normalized": false,
1896
+ "rstrip": false,
1897
+ "single_word": false,
1898
+ "special": true
1899
+ },
1900
+ "128237": {
1901
+ "content": "<|reserved_special_token_229|>",
1902
+ "lstrip": false,
1903
+ "normalized": false,
1904
+ "rstrip": false,
1905
+ "single_word": false,
1906
+ "special": true
1907
+ },
1908
+ "128238": {
1909
+ "content": "<|reserved_special_token_230|>",
1910
+ "lstrip": false,
1911
+ "normalized": false,
1912
+ "rstrip": false,
1913
+ "single_word": false,
1914
+ "special": true
1915
+ },
1916
+ "128239": {
1917
+ "content": "<|reserved_special_token_231|>",
1918
+ "lstrip": false,
1919
+ "normalized": false,
1920
+ "rstrip": false,
1921
+ "single_word": false,
1922
+ "special": true
1923
+ },
1924
+ "128240": {
1925
+ "content": "<|reserved_special_token_232|>",
1926
+ "lstrip": false,
1927
+ "normalized": false,
1928
+ "rstrip": false,
1929
+ "single_word": false,
1930
+ "special": true
1931
+ },
1932
+ "128241": {
1933
+ "content": "<|reserved_special_token_233|>",
1934
+ "lstrip": false,
1935
+ "normalized": false,
1936
+ "rstrip": false,
1937
+ "single_word": false,
1938
+ "special": true
1939
+ },
1940
+ "128242": {
1941
+ "content": "<|reserved_special_token_234|>",
1942
+ "lstrip": false,
1943
+ "normalized": false,
1944
+ "rstrip": false,
1945
+ "single_word": false,
1946
+ "special": true
1947
+ },
1948
+ "128243": {
1949
+ "content": "<|reserved_special_token_235|>",
1950
+ "lstrip": false,
1951
+ "normalized": false,
1952
+ "rstrip": false,
1953
+ "single_word": false,
1954
+ "special": true
1955
+ },
1956
+ "128244": {
1957
+ "content": "<|reserved_special_token_236|>",
1958
+ "lstrip": false,
1959
+ "normalized": false,
1960
+ "rstrip": false,
1961
+ "single_word": false,
1962
+ "special": true
1963
+ },
1964
+ "128245": {
1965
+ "content": "<|reserved_special_token_237|>",
1966
+ "lstrip": false,
1967
+ "normalized": false,
1968
+ "rstrip": false,
1969
+ "single_word": false,
1970
+ "special": true
1971
+ },
1972
+ "128246": {
1973
+ "content": "<|reserved_special_token_238|>",
1974
+ "lstrip": false,
1975
+ "normalized": false,
1976
+ "rstrip": false,
1977
+ "single_word": false,
1978
+ "special": true
1979
+ },
1980
+ "128247": {
1981
+ "content": "<|reserved_special_token_239|>",
1982
+ "lstrip": false,
1983
+ "normalized": false,
1984
+ "rstrip": false,
1985
+ "single_word": false,
1986
+ "special": true
1987
+ },
1988
+ "128248": {
1989
+ "content": "<|reserved_special_token_240|>",
1990
+ "lstrip": false,
1991
+ "normalized": false,
1992
+ "rstrip": false,
1993
+ "single_word": false,
1994
+ "special": true
1995
+ },
1996
+ "128249": {
1997
+ "content": "<|reserved_special_token_241|>",
1998
+ "lstrip": false,
1999
+ "normalized": false,
2000
+ "rstrip": false,
2001
+ "single_word": false,
2002
+ "special": true
2003
+ },
2004
+ "128250": {
2005
+ "content": "<|reserved_special_token_242|>",
2006
+ "lstrip": false,
2007
+ "normalized": false,
2008
+ "rstrip": false,
2009
+ "single_word": false,
2010
+ "special": true
2011
+ },
2012
+ "128251": {
2013
+ "content": "<|reserved_special_token_243|>",
2014
+ "lstrip": false,
2015
+ "normalized": false,
2016
+ "rstrip": false,
2017
+ "single_word": false,
2018
+ "special": true
2019
+ },
2020
+ "128252": {
2021
+ "content": "<|reserved_special_token_244|>",
2022
+ "lstrip": false,
2023
+ "normalized": false,
2024
+ "rstrip": false,
2025
+ "single_word": false,
2026
+ "special": true
2027
+ },
2028
+ "128253": {
2029
+ "content": "<|reserved_special_token_245|>",
2030
+ "lstrip": false,
2031
+ "normalized": false,
2032
+ "rstrip": false,
2033
+ "single_word": false,
2034
+ "special": true
2035
+ },
2036
+ "128254": {
2037
+ "content": "<|reserved_special_token_246|>",
2038
+ "lstrip": false,
2039
+ "normalized": false,
2040
+ "rstrip": false,
2041
+ "single_word": false,
2042
+ "special": true
2043
+ },
2044
+ "128255": {
2045
+ "content": "<|reserved_special_token_247|>",
2046
+ "lstrip": false,
2047
+ "normalized": false,
2048
+ "rstrip": false,
2049
+ "single_word": false,
2050
+ "special": true
2051
+ }
2052
+ },
2053
+ "bos_token": "<|begin_of_text|>",
2054
+ "chat_template": [
2055
+ {
2056
+ "name": "default",
2057
+ "template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}"
2058
+ },
2059
+ {
2060
+ "name": "tool_use",
2061
+ "template": "{%- macro json_to_python_type(json_spec) %}\n{%- set basic_type_map = {\n \"string\": \"str\",\n \"number\": \"float\",\n \"integer\": \"int\",\n \"boolean\": \"bool\"\n} %}\n\n{%- if basic_type_map[json_spec.type] is defined %}\n {{- basic_type_map[json_spec.type] }}\n{%- elif json_spec.type == \"array\" %}\n {{- \"list[\" + json_to_python_type(json_spec|items) + \"]\"}}\n{%- elif json_spec.type == \"object\" %}\n {%- if json_spec.additionalProperties is defined %}\n {{- \"dict[str, \" + json_to_python_type(json_spec.additionalProperties) + ']'}}\n {%- else %}\n {{- \"dict\" }}\n {%- endif %}\n{%- elif json_spec.type is iterable %}\n {{- \"Union[\" }}\n {%- for t in json_spec.type %}\n {{- json_to_python_type({\"type\": t}) }}\n {%- if not loop.last %}\n {{- \",\" }} \n {%- endif %}\n {%- endfor %}\n {{- \"]\" }}\n{%- else %}\n {{- \"Any\" }}\n{%- endif %}\n{%- endmacro %}\n\n\n{{- bos_token }}\n{{- '<|im_start|>system\n' }}\n{{- \"You are a function calling AI model. You are provided with function signatures within <tools></tools> XML tags. You may call one or more functions to assist with the user query. Don't make assumptions about what values to plug into functions. Here are the available tools: <tools> \" }}\n{%- for tool in tools %}\n {%- if tool.function is defined %}\n {%- set tool = tool.function %}\n {%- endif %}\n {{- '{\"type\": \"function\", \"function\": ' }}\n {{- '{\"name\": \"' + tool.name + '\", ' }}\n {{- '\"description\": \"' + tool.name + '(' }}\n {%- for param_name, param_fields in tool.parameters.properties|items %}\n {{- param_name + \": \" + json_to_python_type(param_fields) }}\n {%- if not loop.last %}\n {{- \", \" }}\n {%- endif %}\n {%- endfor %}\n {{- \")\" }}\n {%- if tool.return is defined %}\n {{- \" -> \" + json_to_python_type(tool.return) }}\n {%- endif %}\n {{- \" - \" + tool.description + \"\n\n\" }}\n {%- for param_name, param_fields in tool.parameters.properties|items %}\n {%- if loop.first %}\n {{- \" Args:\n\" }}\n {%- endif %}\n {{- \" \" + param_name + \"(\" + json_to_python_type(param_fields) + \"): \" + param_fields.description|trim }}\n {%- endfor %}\n {%- if tool.return is defined and tool.return.description is defined %}\n {{- \"\n Returns:\n \" + tool.return.description }}\n {%- endif %}\n {{- '\"' }}\n {{- ', \"parameters\": ' }}\n {%- if tool.parameters.properties | length == 0 %}\n {{- \"{}\" }}\n {%- else %}\n {{- tool.parameters|tojson }}\n {%- endif %}\n {{- \"}\" }}\n {%- if not loop.last %}\n {{- \"\n\" }}\n {%- endif %}\n{%- endfor %}\n{{- \" </tools>\" }}\n{{- 'Use the following pydantic model json schema for each tool call you will make: {\"properties\": {\"name\": {\"title\": \"Name\", \"type\": \"string\"}, \"arguments\": {\"title\": \"Arguments\", \"type\": \"object\"}}, \"required\": [\"name\", \"arguments\"], \"title\": \"FunctionCall\", \"type\": \"object\"}}\n' }}\n{{- \"For each function call return a json object with function name and arguments within <tool_call></tool_call> XML tags as follows:\n\" }}\n{{- \"<tool_call>\n\" }}\n{{- '{\"name\": <function-name>, \"arguments\": <args-dict>}\n' }}\n{{- '</tool_call><|im_end|>\n' }}\n{%- for message in messages %}\n {%- if message.role == \"user\" or message.role == \"system\" or (message.role == \"assistant\" and message.tool_calls is not defined) %}\n {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}\n {%- elif message.role == \"assistant\" %}\n {{- '<|im_start|>' + message.role }}\n {%- for tool_call in message.tool_calls %}\n {{- '\n<tool_call>\n' }} {%- if tool_call.function is defined %}\n {%- set tool_call = tool_call.function %}\n {%- endif %}\n {{- '{' }}\n {{- '\"name\": \"' }}\n {{- tool_call.name }}\n {{- '\"' }}\n {{- ', '}}\n {%- if tool_call.arguments is defined %}\n {{- '\"arguments\": ' }}\n {%- if tool_call.arguments is string %}\n {{- tool_call.arguments }}\n {%- else %}\n {{- tool_call.arguments|tojson }}\n {%- endif %}\n {%- endif %}\n {{- '}' }}\n {{- '\n</tool_call>' }}\n {%- endfor %}\n {{- '<|im_end|>\n' }}\n {%- elif message.role == \"tool\" %}\n {%- if loop.previtem and loop.previtem.role != \"tool\" %}\n {{- '<|im_start|>tool\n' }}\n {%- endif %}\n {{- '<tool_response>\n' }}\n {{- message.content }}\n {%- if not loop.last %}\n {{- '\n</tool_response>\n' }}\n {%- else %}\n {{- '\n</tool_response>' }}\n {%- endif %}\n {%- if not loop.last and loop.nextitem.role != \"tool\" %}\n {{- '<|im_end|>' }}\n {%- elif loop.last %}\n {{- '<|im_end|>' }}\n {%- endif %}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|im_start|>assistant\n' }}\n{%- endif %}\n"
2062
+ }
2063
+ ],
2064
+
2065
+ "clean_up_tokenization_spaces": true,
2066
+ "eos_token": "<|eot_id|>",
2067
+ "extra_special_tokens": {},
2068
+ "model_input_names": [
2069
+ "input_ids",
2070
+ "attention_mask"
2071
+ ],
2072
+ "model_max_length": 131072,
2073
+ "pad_token": "<|end_of_text|>",
2074
+ "padding_side": "left",
2075
+ "tokenizer_class": "PreTrainedTokenizerFast",
2076
+ "unk_token": null
2077
+ }