HachiML commited on
Commit
2e093cc
1 Parent(s): 37193ad

Upload modeling_bit_llama.py

Browse files
Files changed (1) hide show
  1. modeling_bit_llama.py +77 -0
modeling_bit_llama.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ from transformers.models.llama.modeling_llama import (
3
+ LlamaConfig,
4
+ LlamaModel,
5
+ LlamaForCausalLM,
6
+ LlamaAttention,
7
+ LlamaFlashAttention2,
8
+ LlamaSdpaAttention,
9
+ LlamaMLP,
10
+ LlamaDecoderLayer,
11
+ )
12
+ from mybitnet.bitnet import BitLinear
13
+ from torch import nn
14
+
15
+ class BitLlamaConfig(LlamaConfig):
16
+ model_type = "bit_llama"
17
+
18
+ def __init__(self, bits=8, **kwargs):
19
+ super().__init__(**kwargs)
20
+ self.bits = bits
21
+
22
+ class BitLlamaMLP(LlamaMLP):
23
+ def __init__(self, config):
24
+ super().__init__(config)
25
+ self.gate_proj = BitLinear(self.hidden_size, self.intermediate_size, bias=False, bits=config.bits, flg_before_linear=True)
26
+ self.up_proj = BitLinear(self.hidden_size, self.intermediate_size, bias=False, bits=config.bits, flg_before_linear=True)
27
+ self.down_proj = BitLinear(self.intermediate_size, self.hidden_size, bias=False, bits=config.bits, flg_before_linear=False)
28
+
29
+ class BitLlamaAttention(LlamaAttention):
30
+ def __init__(self, config: BitLlamaConfig, layer_idx: Optional[int] = None):
31
+ super().__init__(config)
32
+ self.q_proj = BitLinear(self.hidden_size, self.num_heads * self.head_dim, bias=False, bits=config.bits, flg_before_linear=True)
33
+ self.k_proj = BitLinear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False, bits=config.bits, flg_before_linear=True)
34
+ self.v_proj = BitLinear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False, bits=config.bits, flg_before_linear=True)
35
+ self.o_proj = BitLinear(self.hidden_size, self.hidden_size, bias=False, bits=config.bits, flg_before_linear=True)
36
+
37
+ class BitLlamaFlashAttention2(LlamaFlashAttention2):
38
+ def __init__(self, config: BitLlamaConfig, layer_idx: Optional[int] = None):
39
+ super().__init__(config, layer_idx)
40
+ self.q_proj = BitLinear(self.hidden_size, self.num_heads * self.head_dim, bias=False, bits=config.bits, flg_before_linear=True)
41
+ self.k_proj = BitLinear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False, bits=config.bits, flg_before_linear=True)
42
+ self.v_proj = BitLinear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False, bits=config.bits, flg_before_linear=True)
43
+ self.o_proj = BitLinear(self.hidden_size, self.hidden_size, bias=False, bits=config.bits, flg_before_linear=True)
44
+
45
+ class BitLlamaSdpaAttention(LlamaSdpaAttention):
46
+ def __init__(self, config: BitLlamaConfig, layer_idx: Optional[int] = None):
47
+ super().__init__(config, layer_idx)
48
+ self.q_proj = BitLinear(self.hidden_size, self.num_heads * self.head_dim, bias=False, bits=config.bits, flg_before_linear=True)
49
+ self.k_proj = BitLinear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False, bits=config.bits, flg_before_linear=True)
50
+ self.v_proj = BitLinear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False, bits=config.bits, flg_before_linear=True)
51
+ self.o_proj = BitLinear(self.hidden_size, self.hidden_size, bias=False, bits=config.bits, flg_before_linear=True)
52
+
53
+ BITLLAMA_ATTENTION_CLASSES = {
54
+ "eager": BitLlamaAttention,
55
+ "flash_attention_2": BitLlamaFlashAttention2,
56
+ "sdpa": BitLlamaSdpaAttention,
57
+ }
58
+
59
+ class BitLlamaDecoderLayer(LlamaDecoderLayer):
60
+ def __init__(self, config: BitLlamaConfig, layer_idx: int):
61
+ super().__init__(config, layer_idx)
62
+ self.self_attn = BITLLAMA_ATTENTION_CLASSES[config._attn_implementation](config=config, layer_idx=layer_idx)
63
+ self.mlp = BitLlamaMLP(config)
64
+
65
+ class BitLlamaModel(LlamaModel):
66
+ def __init__(self, config: BitLlamaConfig):
67
+ super().__init__(config)
68
+ self.layers = nn.ModuleList(
69
+ [BitLlamaDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
70
+ )
71
+
72
+ class BitLlamaForCausalLM(LlamaForCausalLM):
73
+ def __init__(self, config: BitLlamaConfig):
74
+ super().__init__(config)
75
+ self.model = BitLlamaModel(config)
76
+ self.lm_head = BitLinear(config.hidden_size, config.vocab_size, bias=False, bits=config.bits, flg_before_linear=True)
77
+