LongMountain commited on
Commit
0be73e4
·
1 Parent(s): 82f6818

init commit

Browse files
Files changed (4) hide show
  1. README.md +13 -0
  2. attn_gate_weights.pth +3 -0
  3. config.json +35 -0
  4. trainer_state.json +0 -0
README.md CHANGED
@@ -1,3 +1,16 @@
1
  ---
2
  license: apache-2.0
 
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: apache-2.0
3
+ library_name: transformers
4
+ base_model:
5
+ - Qwen/QwQ-32B
6
+ base_model_relation: "adapter"
7
  ---
8
+
9
+
10
+
11
+ This repo contains the AttnGates' weights for QwQ-32B Model introduced by SeerAttention.
12
+
13
+ [SeerAttention](https://arxiv.org/pdf/2410.13276) introduces learnable AttnGate modules to accelerate the computationally intensive prefill stage of long-context large language models (LLMs) via dynamic block-level sparsity. The AttnGates are trained in a parameter-efficient self-distillation framework, where they learn to mimic the 2D max-pooled attention patterns of the original frozen model, preserving its integrity while avoiding costly retraining. During inference, these gates generate block-sparse binary masks by applying threshold/TopK to their learned soft scores, enabling efficient computation through a custom block-sparse FlashAttention kernel.
14
+
15
+ ## Original Github Repo
16
+ [https://github.com/microsoft/SeerAttention](https://github.com/microsoft/SeerAttention).
attn_gate_weights.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d63e9047219819ec64bded486dc2d2c5a51a4127004973c311eaeec276cc6363
3
+ size 251668686
config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_attn_implementation_autoset": true,
3
+ "_name_or_path": "Qwen/QwQ-32B",
4
+ "architectures": [
5
+ "Qwen2ForCausalLM"
6
+ ],
7
+ "attention_dropout": 0.0,
8
+ "base_model": "Qwen/QwQ-32B",
9
+ "bos_token_id": 151643,
10
+ "eos_token_id": 151645,
11
+ "hidden_act": "silu",
12
+ "hidden_size": 5120,
13
+ "initializer_range": 0.02,
14
+ "intermediate_size": 27648,
15
+ "max_position_embeddings": 131072,
16
+ "max_window_layers": 64,
17
+ "model_type": "qwen2",
18
+ "num_attention_heads": 40,
19
+ "num_hidden_layers": 64,
20
+ "num_key_value_heads": 8,
21
+ "rms_norm_eps": 1e-05,
22
+ "rope_scaling": null,
23
+ "rope_theta": 1000000.0,
24
+ "seerattn_gate_block_size": 64,
25
+ "seerattn_gate_force_double": false,
26
+ "seerattn_gate_hidden_size": 128,
27
+ "seerattn_gate_type": "Qavg_Kmaxminavg",
28
+ "sliding_window": 32768,
29
+ "tie_word_embeddings": false,
30
+ "torch_dtype": "bfloat16",
31
+ "transformers_version": "4.49.0",
32
+ "use_cache": true,
33
+ "use_sliding_window": false,
34
+ "vocab_size": 152064
35
+ }
trainer_state.json ADDED
The diff for this file is too large to render. See raw diff