legraphista
commited on
Commit
•
f54e9b6
1
Parent(s):
bfdd77a
Upload imatrix.log with huggingface_hub
Browse files- imatrix.log +153 -0
imatrix.log
ADDED
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
llama_model_loader: loaded meta data with 33 key-value pairs and 288 tensors from gemma-2-2b-IMat-GGUF/gemma-2-2b.Q8_0.gguf.hardlink.gguf (version GGUF V3 (latest))
|
2 |
+
llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
|
3 |
+
llama_model_loader: - kv 0: general.architecture str = gemma2
|
4 |
+
llama_model_loader: - kv 1: general.type str = model
|
5 |
+
llama_model_loader: - kv 2: general.name str = Gemma 2 2b
|
6 |
+
llama_model_loader: - kv 3: general.basename str = gemma-2
|
7 |
+
llama_model_loader: - kv 4: general.size_label str = 2B
|
8 |
+
llama_model_loader: - kv 5: general.license str = gemma
|
9 |
+
llama_model_loader: - kv 6: general.tags arr[str,1] = ["text-generation"]
|
10 |
+
llama_model_loader: - kv 7: gemma2.context_length u32 = 8192
|
11 |
+
llama_model_loader: - kv 8: gemma2.embedding_length u32 = 2304
|
12 |
+
llama_model_loader: - kv 9: gemma2.block_count u32 = 26
|
13 |
+
llama_model_loader: - kv 10: gemma2.feed_forward_length u32 = 9216
|
14 |
+
llama_model_loader: - kv 11: gemma2.attention.head_count u32 = 8
|
15 |
+
llama_model_loader: - kv 12: gemma2.attention.head_count_kv u32 = 4
|
16 |
+
llama_model_loader: - kv 13: gemma2.attention.layer_norm_rms_epsilon f32 = 0.000001
|
17 |
+
llama_model_loader: - kv 14: gemma2.attention.key_length u32 = 256
|
18 |
+
llama_model_loader: - kv 15: gemma2.attention.value_length u32 = 256
|
19 |
+
llama_model_loader: - kv 16: general.file_type u32 = 7
|
20 |
+
llama_model_loader: - kv 17: gemma2.attn_logit_softcapping f32 = 50.000000
|
21 |
+
llama_model_loader: - kv 18: gemma2.final_logit_softcapping f32 = 30.000000
|
22 |
+
llama_model_loader: - kv 19: gemma2.attention.sliding_window u32 = 4096
|
23 |
+
llama_model_loader: - kv 20: tokenizer.ggml.model str = llama
|
24 |
+
llama_model_loader: - kv 21: tokenizer.ggml.pre str = default
|
25 |
+
llama_model_loader: - kv 22: tokenizer.ggml.tokens arr[str,256000] = ["<pad>", "<eos>", "<bos>", "<unk>", ...
|
26 |
+
llama_model_loader: - kv 23: tokenizer.ggml.scores arr[f32,256000] = [-1000.000000, -1000.000000, -1000.00...
|
27 |
+
llama_model_loader: - kv 24: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, ...
|
28 |
+
llama_model_loader: - kv 25: tokenizer.ggml.bos_token_id u32 = 2
|
29 |
+
llama_model_loader: - kv 26: tokenizer.ggml.eos_token_id u32 = 1
|
30 |
+
llama_model_loader: - kv 27: tokenizer.ggml.unknown_token_id u32 = 3
|
31 |
+
llama_model_loader: - kv 28: tokenizer.ggml.padding_token_id u32 = 0
|
32 |
+
llama_model_loader: - kv 29: tokenizer.ggml.add_bos_token bool = true
|
33 |
+
llama_model_loader: - kv 30: tokenizer.ggml.add_eos_token bool = false
|
34 |
+
llama_model_loader: - kv 31: tokenizer.ggml.add_space_prefix bool = false
|
35 |
+
llama_model_loader: - kv 32: general.quantization_version u32 = 2
|
36 |
+
llama_model_loader: - type f32: 105 tensors
|
37 |
+
llama_model_loader: - type q8_0: 183 tensors
|
38 |
+
llm_load_vocab: special tokens cache size = 249
|
39 |
+
llm_load_vocab: token to piece cache size = 1.6014 MB
|
40 |
+
llm_load_print_meta: format = GGUF V3 (latest)
|
41 |
+
llm_load_print_meta: arch = gemma2
|
42 |
+
llm_load_print_meta: vocab type = SPM
|
43 |
+
llm_load_print_meta: n_vocab = 256000
|
44 |
+
llm_load_print_meta: n_merges = 0
|
45 |
+
llm_load_print_meta: vocab_only = 0
|
46 |
+
llm_load_print_meta: n_ctx_train = 8192
|
47 |
+
llm_load_print_meta: n_embd = 2304
|
48 |
+
llm_load_print_meta: n_layer = 26
|
49 |
+
llm_load_print_meta: n_head = 8
|
50 |
+
llm_load_print_meta: n_head_kv = 4
|
51 |
+
llm_load_print_meta: n_rot = 256
|
52 |
+
llm_load_print_meta: n_swa = 4096
|
53 |
+
llm_load_print_meta: n_embd_head_k = 256
|
54 |
+
llm_load_print_meta: n_embd_head_v = 256
|
55 |
+
llm_load_print_meta: n_gqa = 2
|
56 |
+
llm_load_print_meta: n_embd_k_gqa = 1024
|
57 |
+
llm_load_print_meta: n_embd_v_gqa = 1024
|
58 |
+
llm_load_print_meta: f_norm_eps = 0.0e+00
|
59 |
+
llm_load_print_meta: f_norm_rms_eps = 1.0e-06
|
60 |
+
llm_load_print_meta: f_clamp_kqv = 0.0e+00
|
61 |
+
llm_load_print_meta: f_max_alibi_bias = 0.0e+00
|
62 |
+
llm_load_print_meta: f_logit_scale = 0.0e+00
|
63 |
+
llm_load_print_meta: n_ff = 9216
|
64 |
+
llm_load_print_meta: n_expert = 0
|
65 |
+
llm_load_print_meta: n_expert_used = 0
|
66 |
+
llm_load_print_meta: causal attn = 1
|
67 |
+
llm_load_print_meta: pooling type = 0
|
68 |
+
llm_load_print_meta: rope type = 2
|
69 |
+
llm_load_print_meta: rope scaling = linear
|
70 |
+
llm_load_print_meta: freq_base_train = 10000.0
|
71 |
+
llm_load_print_meta: freq_scale_train = 1
|
72 |
+
llm_load_print_meta: n_ctx_orig_yarn = 8192
|
73 |
+
llm_load_print_meta: rope_finetuned = unknown
|
74 |
+
llm_load_print_meta: ssm_d_conv = 0
|
75 |
+
llm_load_print_meta: ssm_d_inner = 0
|
76 |
+
llm_load_print_meta: ssm_d_state = 0
|
77 |
+
llm_load_print_meta: ssm_dt_rank = 0
|
78 |
+
llm_load_print_meta: model type = 2B
|
79 |
+
llm_load_print_meta: model ftype = Q8_0
|
80 |
+
llm_load_print_meta: model params = 2.61 B
|
81 |
+
llm_load_print_meta: model size = 2.59 GiB (8.50 BPW)
|
82 |
+
llm_load_print_meta: general.name = Gemma 2 2b
|
83 |
+
llm_load_print_meta: BOS token = 2 '<bos>'
|
84 |
+
llm_load_print_meta: EOS token = 1 '<eos>'
|
85 |
+
llm_load_print_meta: UNK token = 3 '<unk>'
|
86 |
+
llm_load_print_meta: PAD token = 0 '<pad>'
|
87 |
+
llm_load_print_meta: LF token = 227 '<0x0A>'
|
88 |
+
llm_load_print_meta: EOT token = 107 '<end_of_turn>'
|
89 |
+
llm_load_print_meta: max token length = 48
|
90 |
+
ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no
|
91 |
+
ggml_cuda_init: GGML_CUDA_FORCE_CUBLAS: no
|
92 |
+
ggml_cuda_init: found 1 CUDA devices:
|
93 |
+
Device 0: NVIDIA GeForce RTX 4090, compute capability 8.9, VMM: yes
|
94 |
+
llm_load_tensors: ggml ctx size = 0.26 MiB
|
95 |
+
llm_load_tensors: offloading 26 repeating layers to GPU
|
96 |
+
llm_load_tensors: offloading non-repeating layers to GPU
|
97 |
+
llm_load_tensors: offloaded 27/27 layers to GPU
|
98 |
+
llm_load_tensors: CPU buffer size = 597.66 MiB
|
99 |
+
llm_load_tensors: CUDA0 buffer size = 2649.78 MiB
|
100 |
+
..................................................................
|
101 |
+
llama_new_context_with_model: n_ctx = 512
|
102 |
+
llama_new_context_with_model: n_batch = 512
|
103 |
+
llama_new_context_with_model: n_ubatch = 512
|
104 |
+
llama_new_context_with_model: flash_attn = 0
|
105 |
+
llama_new_context_with_model: freq_base = 10000.0
|
106 |
+
llama_new_context_with_model: freq_scale = 1
|
107 |
+
llama_kv_cache_init: CUDA0 KV buffer size = 52.00 MiB
|
108 |
+
llama_new_context_with_model: KV self size = 52.00 MiB, K (f16): 26.00 MiB, V (f16): 26.00 MiB
|
109 |
+
llama_new_context_with_model: CUDA_Host output buffer size = 0.98 MiB
|
110 |
+
llama_new_context_with_model: CUDA0 compute buffer size = 504.50 MiB
|
111 |
+
llama_new_context_with_model: CUDA_Host compute buffer size = 6.51 MiB
|
112 |
+
llama_new_context_with_model: graph nodes = 1050
|
113 |
+
llama_new_context_with_model: graph splits = 2
|
114 |
+
|
115 |
+
system_info: n_threads = 25 / 32 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 1 | AVX512_VNNI = 1 | AVX512_BF16 = 1 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 |
|
116 |
+
compute_imatrix: tokenizing the input ..
|
117 |
+
compute_imatrix: tokenization took 127.479 ms
|
118 |
+
compute_imatrix: computing over 128 chunks with batch_size 512
|
119 |
+
compute_imatrix: 0.47 seconds per pass - ETA 0.98 minutes
|
120 |
+
[1]5.7344,[2]4.2542,[3]4.0837,[4]5.0956,[5]5.3678,[6]4.6906,[7]5.1602,[8]5.3917,[9]5.5928,
|
121 |
+
save_imatrix: stored collected data after 10 chunks in gemma-2-2b-IMat-GGUF/imatrix.dat
|
122 |
+
[10]5.0580,[11]5.2267,[12]5.6768,[13]6.2150,[14]6.4010,[15]6.8117,[16]7.1236,[17]7.2287,[18]7.5431,[19]7.2623,
|
123 |
+
save_imatrix: stored collected data after 20 chunks in gemma-2-2b-IMat-GGUF/imatrix.dat
|
124 |
+
[20]7.3102,[21]7.4963,[22]7.4363,[23]7.5728,[24]7.7473,[25]7.9231,[26]7.6887,[27]7.9831,[28]8.2145,[29]8.2222,
|
125 |
+
save_imatrix: stored collected data after 30 chunks in gemma-2-2b-IMat-GGUF/imatrix.dat
|
126 |
+
[30]8.2123,[31]7.7328,[32]7.5207,[33]7.4281,[34]7.3098,[35]7.2145,[36]7.2683,[37]7.3504,[38]7.3797,[39]7.5269,
|
127 |
+
save_imatrix: stored collected data after 40 chunks in gemma-2-2b-IMat-GGUF/imatrix.dat
|
128 |
+
[40]7.6712,[41]7.8440,[42]8.1609,[43]8.4545,[44]8.7456,[45]8.9497,[46]8.8151,[47]8.8622,[48]9.0405,[49]9.1746,
|
129 |
+
save_imatrix: stored collected data after 50 chunks in gemma-2-2b-IMat-GGUF/imatrix.dat
|
130 |
+
[50]8.9957,[51]8.9979,[52]9.0139,[53]9.1546,[54]9.3043,[55]9.4244,[56]9.4541,[57]9.4445,[58]9.4497,[59]9.2942,
|
131 |
+
save_imatrix: stored collected data after 60 chunks in gemma-2-2b-IMat-GGUF/imatrix.dat
|
132 |
+
[60]9.2112,[61]9.0675,[62]9.0184,[63]9.0833,[64]9.0639,[65]9.0433,[66]9.0561,[67]9.0020,[68]8.9303,[69]8.9519,
|
133 |
+
save_imatrix: stored collected data after 70 chunks in gemma-2-2b-IMat-GGUF/imatrix.dat
|
134 |
+
[70]8.9234,[71]8.9093,[72]8.9169,[73]8.8727,[74]8.8301,[75]8.7879,[76]8.7867,[77]8.7905,[78]8.7733,[79]8.7336,
|
135 |
+
save_imatrix: stored collected data after 80 chunks in gemma-2-2b-IMat-GGUF/imatrix.dat
|
136 |
+
[80]8.7822,[81]8.8030,[82]8.7770,[83]8.7602,[84]8.7996,[85]8.6746,[86]8.6369,[87]8.5818,[88]8.5882,[89]8.6012,
|
137 |
+
save_imatrix: stored collected data after 90 chunks in gemma-2-2b-IMat-GGUF/imatrix.dat
|
138 |
+
[90]8.6103,[91]8.5283,[92]8.4495,[93]8.3605,[94]8.2716,[95]8.2000,[96]8.1240,[97]8.0524,[98]7.9900,[99]8.0104,
|
139 |
+
save_imatrix: stored collected data after 100 chunks in gemma-2-2b-IMat-GGUF/imatrix.dat
|
140 |
+
[100]8.0186,[101]8.1076,[102]8.1813,[103]8.2514,[104]8.3931,[105]8.4999,[106]8.5135,[107]8.5358,[108]8.5458,[109]8.5243,
|
141 |
+
save_imatrix: stored collected data after 110 chunks in gemma-2-2b-IMat-GGUF/imatrix.dat
|
142 |
+
[110]8.5230,[111]8.4835,[112]8.4384,[113]8.4725,[114]8.4849,[115]8.4836,[116]8.4700,[117]8.5049,[118]8.5216,[119]8.5248,
|
143 |
+
save_imatrix: stored collected data after 120 chunks in gemma-2-2b-IMat-GGUF/imatrix.dat
|
144 |
+
[120]8.5249,[121]8.5319,[122]8.4888,[123]8.5569,[124]8.6259,[125]8.6848,[126]8.7687,[127]8.8350,[128]8.8949,
|
145 |
+
save_imatrix: stored collected data after 128 chunks in gemma-2-2b-IMat-GGUF/imatrix.dat
|
146 |
+
|
147 |
+
llama_print_timings: load time = 1162.27 ms
|
148 |
+
llama_print_timings: sample time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
|
149 |
+
llama_print_timings: prompt eval time = 35926.81 ms / 65536 tokens ( 0.55 ms per token, 1824.15 tokens per second)
|
150 |
+
llama_print_timings: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
|
151 |
+
llama_print_timings: total time = 38056.02 ms / 65537 tokens
|
152 |
+
|
153 |
+
Final estimate: PPL = 8.8949 +/- 0.13669
|