Cheng98 commited on
Commit
c864d0c
·
verified ·
1 Parent(s): 42cb496

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/formatted_tensors.json +733 -0
  2. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/input_ids.npy +3 -0
  3. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.lm_head.input.npy +3 -0
  4. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.lm_head.output.npy +3 -0
  5. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.lm_head.weight.npy +3 -0
  6. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.embed_tokens.output.npy +3 -0
  7. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.embed_tokens.weight.npy +3 -0
  8. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add1.input.npy +3 -0
  9. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add1.other.npy +3 -0
  10. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add1.output.npy +3 -0
  11. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add2.input.npy +3 -0
  12. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add2.other.npy +3 -0
  13. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add2.output.npy +3 -0
  14. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.input.npy +3 -0
  15. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.output.npy +3 -0
  16. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.input.npy +3 -0
  17. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.output.npy +3 -0
  18. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.input.npy +3 -0
  19. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.output.npy +3 -0
  20. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.weight.npy +3 -0
  21. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.input.npy +3 -0
  22. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.output.npy +3 -0
  23. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.weight.npy +3 -0
  24. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.input.npy +3 -0
  25. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.other.npy +3 -0
  26. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.output.npy +3 -0
  27. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.input.npy +3 -0
  28. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.output.npy +3 -0
  29. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.weight.npy +3 -0
  30. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.input.npy +3 -0
  31. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.output.npy +3 -0
  32. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.input.npy +3 -0
  33. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.other.npy +3 -0
  34. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.output.npy +3 -0
  35. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.input.npy +3 -0
  36. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.other.npy +3 -0
  37. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.output.npy +3 -0
  38. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_masked.npy +3 -0
  39. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_softmaxed.npy +3 -0
  40. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.input.npy +3 -0
  41. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.output.npy +3 -0
  42. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.weight.npy +3 -0
  43. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.input.npy +3 -0
  44. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.output.npy +3 -0
  45. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.weight.npy +3 -0
  46. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.output.npy +3 -0
  47. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.input.npy +3 -0
  48. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.output.npy +3 -0
  49. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.weight.npy +3 -0
  50. meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.cos.npy +3 -0
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/formatted_tensors.json ADDED
@@ -0,0 +1,733 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "meta-llama_Llama-2-7b-hf.model.embed_tokens.weight": {
3
+ "tensor_meta": {
4
+ "is_emulated": true,
5
+ "dtype": "emulated_mxint8",
6
+ "block_size": 32,
7
+ "block_axis": -1,
8
+ "shape": [
9
+ 32000,
10
+ 4096
11
+ ]
12
+ },
13
+ "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.embed_tokens.weight.npy"
14
+ },
15
+ "meta-llama_Llama-2-7b-hf.model.embed_tokens.output": {
16
+ "tensor_meta": {
17
+ "is_emulated": false,
18
+ "dtype": "torch.bfloat16",
19
+ "shape": [
20
+ 1,
21
+ 1,
22
+ 4096
23
+ ]
24
+ },
25
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.embed_tokens.output.npy"
26
+ },
27
+ "meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.input": {
28
+ "tensor_meta": {
29
+ "is_emulated": false,
30
+ "dtype": "torch.bfloat16",
31
+ "shape": [
32
+ 1,
33
+ 1,
34
+ 4096
35
+ ]
36
+ },
37
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.input.npy"
38
+ },
39
+ "meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.output": {
40
+ "tensor_meta": {
41
+ "is_emulated": false,
42
+ "dtype": "torch.bfloat16",
43
+ "shape": [
44
+ 1,
45
+ 1,
46
+ 4096
47
+ ]
48
+ },
49
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.output.npy"
50
+ },
51
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.input": {
52
+ "tensor_meta": {
53
+ "is_emulated": true,
54
+ "dtype": "emulated_mxint8",
55
+ "block_size": 32,
56
+ "block_axis": -1,
57
+ "shape": [
58
+ 1,
59
+ 1,
60
+ 4096
61
+ ]
62
+ },
63
+ "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.input.npy"
64
+ },
65
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.weight": {
66
+ "tensor_meta": {
67
+ "is_emulated": true,
68
+ "dtype": "emulated_mxint8",
69
+ "block_size": 32,
70
+ "block_axis": -1,
71
+ "shape": [
72
+ 4096,
73
+ 4096
74
+ ]
75
+ },
76
+ "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.weight.npy"
77
+ },
78
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.output": {
79
+ "tensor_meta": {
80
+ "is_emulated": false,
81
+ "dtype": "torch.bfloat16",
82
+ "shape": [
83
+ 1,
84
+ 1,
85
+ 4096
86
+ ]
87
+ },
88
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.output.npy"
89
+ },
90
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.input": {
91
+ "tensor_meta": {
92
+ "is_emulated": true,
93
+ "dtype": "emulated_mxint8",
94
+ "block_size": 32,
95
+ "block_axis": -1,
96
+ "shape": [
97
+ 1,
98
+ 1,
99
+ 4096
100
+ ]
101
+ },
102
+ "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.input.npy"
103
+ },
104
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.weight": {
105
+ "tensor_meta": {
106
+ "is_emulated": true,
107
+ "dtype": "emulated_mxint8",
108
+ "block_size": 32,
109
+ "block_axis": -1,
110
+ "shape": [
111
+ 4096,
112
+ 4096
113
+ ]
114
+ },
115
+ "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.weight.npy"
116
+ },
117
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.output": {
118
+ "tensor_meta": {
119
+ "is_emulated": false,
120
+ "dtype": "torch.bfloat16",
121
+ "shape": [
122
+ 1,
123
+ 1,
124
+ 4096
125
+ ]
126
+ },
127
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.output.npy"
128
+ },
129
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.input": {
130
+ "tensor_meta": {
131
+ "is_emulated": true,
132
+ "dtype": "emulated_mxint8",
133
+ "block_size": 32,
134
+ "block_axis": -1,
135
+ "shape": [
136
+ 1,
137
+ 1,
138
+ 4096
139
+ ]
140
+ },
141
+ "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.input.npy"
142
+ },
143
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.weight": {
144
+ "tensor_meta": {
145
+ "is_emulated": true,
146
+ "dtype": "emulated_mxint8",
147
+ "block_size": 32,
148
+ "block_axis": -1,
149
+ "shape": [
150
+ 4096,
151
+ 4096
152
+ ]
153
+ },
154
+ "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.weight.npy"
155
+ },
156
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.output": {
157
+ "tensor_meta": {
158
+ "is_emulated": false,
159
+ "dtype": "torch.bfloat16",
160
+ "shape": [
161
+ 1,
162
+ 1,
163
+ 4096
164
+ ]
165
+ },
166
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.output.npy"
167
+ },
168
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.cos": {
169
+ "tensor_meta": {
170
+ "is_emulated": false,
171
+ "dtype": "torch.bfloat16",
172
+ "shape": [
173
+ 1,
174
+ 1,
175
+ 128
176
+ ]
177
+ },
178
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.cos.npy"
179
+ },
180
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.sin": {
181
+ "tensor_meta": {
182
+ "is_emulated": false,
183
+ "dtype": "torch.bfloat16",
184
+ "shape": [
185
+ 1,
186
+ 1,
187
+ 128
188
+ ]
189
+ },
190
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.sin.npy"
191
+ },
192
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.query_states": {
193
+ "tensor_meta": {
194
+ "is_emulated": false,
195
+ "dtype": "torch.bfloat16",
196
+ "shape": [
197
+ 1,
198
+ 32,
199
+ 1,
200
+ 128
201
+ ]
202
+ },
203
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.query_states.npy"
204
+ },
205
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.key_states": {
206
+ "tensor_meta": {
207
+ "is_emulated": false,
208
+ "dtype": "torch.bfloat16",
209
+ "shape": [
210
+ 1,
211
+ 32,
212
+ 1,
213
+ 128
214
+ ]
215
+ },
216
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.key_states.npy"
217
+ },
218
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.input": {
219
+ "tensor_meta": {
220
+ "is_emulated": true,
221
+ "dtype": "emulated_mxint8",
222
+ "block_size": 32,
223
+ "block_axis": -1,
224
+ "shape": [
225
+ 32,
226
+ 1,
227
+ 128
228
+ ]
229
+ },
230
+ "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.input.npy"
231
+ },
232
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.other": {
233
+ "tensor_meta": {
234
+ "is_emulated": true,
235
+ "dtype": "emulated_mxint8",
236
+ "block_size": 32,
237
+ "block_axis": -2,
238
+ "shape": [
239
+ 32,
240
+ 128,
241
+ 1
242
+ ]
243
+ },
244
+ "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.other.npy"
245
+ },
246
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.output": {
247
+ "tensor_meta": {
248
+ "is_emulated": false,
249
+ "dtype": "torch.bfloat16",
250
+ "shape": [
251
+ 32,
252
+ 1,
253
+ 1
254
+ ]
255
+ },
256
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.output.npy"
257
+ },
258
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_masked": {
259
+ "tensor_meta": {
260
+ "is_emulated": false,
261
+ "dtype": "torch.bfloat16",
262
+ "shape": [
263
+ 1,
264
+ 32,
265
+ 1,
266
+ 1
267
+ ]
268
+ },
269
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_masked.npy"
270
+ },
271
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_softmaxed": {
272
+ "tensor_meta": {
273
+ "is_emulated": false,
274
+ "dtype": "torch.bfloat16",
275
+ "shape": [
276
+ 1,
277
+ 32,
278
+ 1,
279
+ 1
280
+ ]
281
+ },
282
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_softmaxed.npy"
283
+ },
284
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.input": {
285
+ "tensor_meta": {
286
+ "is_emulated": true,
287
+ "dtype": "emulated_mxint8",
288
+ "block_size": 32,
289
+ "block_axis": -1,
290
+ "shape": [
291
+ 32,
292
+ 1,
293
+ 1
294
+ ]
295
+ },
296
+ "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.input.npy"
297
+ },
298
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.other": {
299
+ "tensor_meta": {
300
+ "is_emulated": true,
301
+ "dtype": "emulated_mxint8",
302
+ "block_size": 32,
303
+ "block_axis": -2,
304
+ "shape": [
305
+ 32,
306
+ 1,
307
+ 128
308
+ ]
309
+ },
310
+ "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.other.npy"
311
+ },
312
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.output": {
313
+ "tensor_meta": {
314
+ "is_emulated": false,
315
+ "dtype": "torch.bfloat16",
316
+ "shape": [
317
+ 32,
318
+ 1,
319
+ 128
320
+ ]
321
+ },
322
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.output.npy"
323
+ },
324
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.input": {
325
+ "tensor_meta": {
326
+ "is_emulated": true,
327
+ "dtype": "emulated_mxint8",
328
+ "block_size": 32,
329
+ "block_axis": -1,
330
+ "shape": [
331
+ 1,
332
+ 1,
333
+ 4096
334
+ ]
335
+ },
336
+ "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.input.npy"
337
+ },
338
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.weight": {
339
+ "tensor_meta": {
340
+ "is_emulated": true,
341
+ "dtype": "emulated_mxint8",
342
+ "block_size": 32,
343
+ "block_axis": -1,
344
+ "shape": [
345
+ 4096,
346
+ 4096
347
+ ]
348
+ },
349
+ "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.weight.npy"
350
+ },
351
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.output": {
352
+ "tensor_meta": {
353
+ "is_emulated": false,
354
+ "dtype": "torch.bfloat16",
355
+ "shape": [
356
+ 1,
357
+ 1,
358
+ 4096
359
+ ]
360
+ },
361
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.output.npy"
362
+ },
363
+ "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.output": {
364
+ "tensor_meta": {
365
+ "is_emulated": false,
366
+ "dtype": "torch.bfloat16",
367
+ "shape": [
368
+ 1,
369
+ 1,
370
+ 4096
371
+ ]
372
+ },
373
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.output.npy"
374
+ },
375
+ "meta-llama_Llama-2-7b-hf.model.layers.0.add1.input": {
376
+ "tensor_meta": {
377
+ "is_emulated": false,
378
+ "dtype": "torch.bfloat16",
379
+ "shape": [
380
+ 1,
381
+ 1,
382
+ 4096
383
+ ]
384
+ },
385
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add1.input.npy"
386
+ },
387
+ "meta-llama_Llama-2-7b-hf.model.layers.0.add1.other": {
388
+ "tensor_meta": {
389
+ "is_emulated": false,
390
+ "dtype": "torch.bfloat16",
391
+ "shape": [
392
+ 1,
393
+ 1,
394
+ 4096
395
+ ]
396
+ },
397
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add1.other.npy"
398
+ },
399
+ "meta-llama_Llama-2-7b-hf.model.layers.0.add1.output": {
400
+ "tensor_meta": {
401
+ "is_emulated": false,
402
+ "dtype": "torch.bfloat16",
403
+ "shape": [
404
+ 1,
405
+ 1,
406
+ 4096
407
+ ]
408
+ },
409
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add1.output.npy"
410
+ },
411
+ "meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.input": {
412
+ "tensor_meta": {
413
+ "is_emulated": false,
414
+ "dtype": "torch.bfloat16",
415
+ "shape": [
416
+ 1,
417
+ 1,
418
+ 4096
419
+ ]
420
+ },
421
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.input.npy"
422
+ },
423
+ "meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.output": {
424
+ "tensor_meta": {
425
+ "is_emulated": false,
426
+ "dtype": "torch.bfloat16",
427
+ "shape": [
428
+ 1,
429
+ 1,
430
+ 4096
431
+ ]
432
+ },
433
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.output.npy"
434
+ },
435
+ "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.input": {
436
+ "tensor_meta": {
437
+ "is_emulated": true,
438
+ "dtype": "emulated_mxint8",
439
+ "block_size": 32,
440
+ "block_axis": -1,
441
+ "shape": [
442
+ 1,
443
+ 1,
444
+ 4096
445
+ ]
446
+ },
447
+ "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.input.npy"
448
+ },
449
+ "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.weight": {
450
+ "tensor_meta": {
451
+ "is_emulated": true,
452
+ "dtype": "emulated_mxint8",
453
+ "block_size": 32,
454
+ "block_axis": -1,
455
+ "shape": [
456
+ 11008,
457
+ 4096
458
+ ]
459
+ },
460
+ "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.weight.npy"
461
+ },
462
+ "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.output": {
463
+ "tensor_meta": {
464
+ "is_emulated": false,
465
+ "dtype": "torch.bfloat16",
466
+ "shape": [
467
+ 1,
468
+ 1,
469
+ 11008
470
+ ]
471
+ },
472
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.output.npy"
473
+ },
474
+ "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.input": {
475
+ "tensor_meta": {
476
+ "is_emulated": true,
477
+ "dtype": "emulated_mxint8",
478
+ "block_size": 32,
479
+ "block_axis": -1,
480
+ "shape": [
481
+ 1,
482
+ 1,
483
+ 4096
484
+ ]
485
+ },
486
+ "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.input.npy"
487
+ },
488
+ "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.weight": {
489
+ "tensor_meta": {
490
+ "is_emulated": true,
491
+ "dtype": "emulated_mxint8",
492
+ "block_size": 32,
493
+ "block_axis": -1,
494
+ "shape": [
495
+ 11008,
496
+ 4096
497
+ ]
498
+ },
499
+ "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.weight.npy"
500
+ },
501
+ "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.output": {
502
+ "tensor_meta": {
503
+ "is_emulated": false,
504
+ "dtype": "torch.bfloat16",
505
+ "shape": [
506
+ 1,
507
+ 1,
508
+ 11008
509
+ ]
510
+ },
511
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.output.npy"
512
+ },
513
+ "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.input": {
514
+ "tensor_meta": {
515
+ "is_emulated": false,
516
+ "dtype": "torch.bfloat16",
517
+ "shape": [
518
+ 1,
519
+ 1,
520
+ 11008
521
+ ]
522
+ },
523
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.input.npy"
524
+ },
525
+ "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.output": {
526
+ "tensor_meta": {
527
+ "is_emulated": false,
528
+ "dtype": "torch.bfloat16",
529
+ "shape": [
530
+ 1,
531
+ 1,
532
+ 11008
533
+ ]
534
+ },
535
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.output.npy"
536
+ },
537
+ "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.input": {
538
+ "tensor_meta": {
539
+ "is_emulated": false,
540
+ "dtype": "torch.bfloat16",
541
+ "shape": [
542
+ 1,
543
+ 1,
544
+ 11008
545
+ ]
546
+ },
547
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.input.npy"
548
+ },
549
+ "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.other": {
550
+ "tensor_meta": {
551
+ "is_emulated": false,
552
+ "dtype": "torch.bfloat16",
553
+ "shape": [
554
+ 1,
555
+ 1,
556
+ 11008
557
+ ]
558
+ },
559
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.other.npy"
560
+ },
561
+ "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.output": {
562
+ "tensor_meta": {
563
+ "is_emulated": false,
564
+ "dtype": "torch.bfloat16",
565
+ "shape": [
566
+ 1,
567
+ 1,
568
+ 11008
569
+ ]
570
+ },
571
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.output.npy"
572
+ },
573
+ "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.input": {
574
+ "tensor_meta": {
575
+ "is_emulated": true,
576
+ "dtype": "emulated_mxint8",
577
+ "block_size": 32,
578
+ "block_axis": -1,
579
+ "shape": [
580
+ 1,
581
+ 1,
582
+ 11008
583
+ ]
584
+ },
585
+ "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.input.npy"
586
+ },
587
+ "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.weight": {
588
+ "tensor_meta": {
589
+ "is_emulated": true,
590
+ "dtype": "emulated_mxint8",
591
+ "block_size": 32,
592
+ "block_axis": -1,
593
+ "shape": [
594
+ 4096,
595
+ 11008
596
+ ]
597
+ },
598
+ "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.weight.npy"
599
+ },
600
+ "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.output": {
601
+ "tensor_meta": {
602
+ "is_emulated": false,
603
+ "dtype": "torch.bfloat16",
604
+ "shape": [
605
+ 1,
606
+ 1,
607
+ 4096
608
+ ]
609
+ },
610
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.output.npy"
611
+ },
612
+ "meta-llama_Llama-2-7b-hf.model.layers.0.add2.input": {
613
+ "tensor_meta": {
614
+ "is_emulated": false,
615
+ "dtype": "torch.bfloat16",
616
+ "shape": [
617
+ 1,
618
+ 1,
619
+ 4096
620
+ ]
621
+ },
622
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add2.input.npy"
623
+ },
624
+ "meta-llama_Llama-2-7b-hf.model.layers.0.add2.other": {
625
+ "tensor_meta": {
626
+ "is_emulated": false,
627
+ "dtype": "torch.bfloat16",
628
+ "shape": [
629
+ 1,
630
+ 1,
631
+ 4096
632
+ ]
633
+ },
634
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add2.other.npy"
635
+ },
636
+ "meta-llama_Llama-2-7b-hf.model.layers.0.add2.output": {
637
+ "tensor_meta": {
638
+ "is_emulated": false,
639
+ "dtype": "torch.bfloat16",
640
+ "shape": [
641
+ 1,
642
+ 1,
643
+ 4096
644
+ ]
645
+ },
646
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add2.output.npy"
647
+ },
648
+ "meta-llama_Llama-2-7b-hf.model.norm.input": {
649
+ "tensor_meta": {
650
+ "is_emulated": false,
651
+ "dtype": "torch.bfloat16",
652
+ "shape": [
653
+ 1,
654
+ 1,
655
+ 4096
656
+ ]
657
+ },
658
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.norm.input.npy"
659
+ },
660
+ "meta-llama_Llama-2-7b-hf.model.norm.output": {
661
+ "tensor_meta": {
662
+ "is_emulated": false,
663
+ "dtype": "torch.bfloat16",
664
+ "shape": [
665
+ 1,
666
+ 1,
667
+ 4096
668
+ ]
669
+ },
670
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.norm.output.npy"
671
+ },
672
+ "meta-llama_Llama-2-7b-hf.lm_head.input": {
673
+ "tensor_meta": {
674
+ "is_emulated": true,
675
+ "dtype": "emulated_mxint8",
676
+ "block_size": 32,
677
+ "block_axis": -1,
678
+ "shape": [
679
+ 1,
680
+ 1,
681
+ 4096
682
+ ]
683
+ },
684
+ "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.lm_head.input.npy"
685
+ },
686
+ "meta-llama_Llama-2-7b-hf.lm_head.weight": {
687
+ "tensor_meta": {
688
+ "is_emulated": true,
689
+ "dtype": "emulated_mxint8",
690
+ "block_size": 32,
691
+ "block_axis": -1,
692
+ "shape": [
693
+ 32000,
694
+ 4096
695
+ ]
696
+ },
697
+ "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.lm_head.weight.npy"
698
+ },
699
+ "meta-llama_Llama-2-7b-hf.lm_head.output": {
700
+ "tensor_meta": {
701
+ "is_emulated": false,
702
+ "dtype": "torch.bfloat16",
703
+ "shape": [
704
+ 1,
705
+ 1,
706
+ 32000
707
+ ]
708
+ },
709
+ "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.lm_head.output.npy"
710
+ },
711
+ "input_ids": {
712
+ "tensor_meta": {
713
+ "is_emulated": false,
714
+ "dtype": "torch.int64",
715
+ "shape": [
716
+ 1,
717
+ 1
718
+ ]
719
+ },
720
+ "int": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/input_ids.npy"
721
+ },
722
+ "position_ids": {
723
+ "tensor_meta": {
724
+ "is_emulated": false,
725
+ "dtype": "torch.int64",
726
+ "shape": [
727
+ 1,
728
+ 1
729
+ ]
730
+ },
731
+ "int": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/position_ids.npy"
732
+ }
733
+ }
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/input_ids.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1eb47bb5e8322411424c1d3e4312e472aeb5cf21135a0650a14111ad8ac8b287
3
+ size 136
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.lm_head.input.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fef09aca4d64e1cbf4dc77219595c05a79b449c4764469f46947c12e9c09cfc
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.lm_head.output.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc369fa525501149827b8de254156fea89bd89fa2db15adc769d6ebe77f618af
3
+ size 512128
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.lm_head.weight.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:562ba61b2aff15fbc3f097a75441dcdfa2c81c4e549df7cf8c132843e1e09dbd
3
+ size 2097152128
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.embed_tokens.output.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98c01f35cf299b5c3bad268f643f2dee32d1126712b435d4a2e38febdb63c841
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.embed_tokens.weight.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1388975e155edbc05fe1c28d0912655aa6265106f5192ef0810a3498c22e979
3
+ size 2097152128
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add1.input.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98c01f35cf299b5c3bad268f643f2dee32d1126712b435d4a2e38febdb63c841
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add1.other.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50168efd64364b7850abbfa5e76dd0f8fd33f80b441e1e67761d7249b875640c
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add1.output.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7d32e411316624c0ed2f7b8b3f3903d77f2ec4df90a122992967e28ed5443bb
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add2.input.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7d32e411316624c0ed2f7b8b3f3903d77f2ec4df90a122992967e28ed5443bb
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add2.other.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f30c21843e6f29d3c5dd1df4d9c4b86946ec81e6a98869744b888f0b983eba83
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add2.output.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e188b25a4891476f2f2d61ddebe79eddb6c725eb56bc5575094b9098fa61eda
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.input.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98c01f35cf299b5c3bad268f643f2dee32d1126712b435d4a2e38febdb63c841
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.output.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0eadb0ba19cff507cc1b50fa0adb3057a75be05df95f104c57f5e2824d8a7915
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.input.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a45cb4954612c5ba9b9eac55b717a60fa0d2608e8b5464db0ca98c45e51d1a04
3
+ size 176256
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.output.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfd9c0f8ff57ad5ed26a9b7212e11c436848e6f6e803ae4210ff1b358d0afa72
3
+ size 176256
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.input.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eab83152546ebdbd5902900867f768fa2bcd23eba17698c54a943d75d3c5a35c
3
+ size 176256
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.output.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f30c21843e6f29d3c5dd1df4d9c4b86946ec81e6a98869744b888f0b983eba83
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.weight.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:593afa2fc524c28e335c4db7df000ed9e94ea9940cb87a146a8dfb71cbf94fc0
3
+ size 721420416
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.input.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d04504eeff2f40c9e65b5cf9337f1548d24c1c15c34767038209a8dfda597c8
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.output.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a45cb4954612c5ba9b9eac55b717a60fa0d2608e8b5464db0ca98c45e51d1a04
3
+ size 176256
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.weight.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1520c00f2cee83a6a96805a1fdc51fb9483db5516a1b1b9a673f46329aeba7d0
3
+ size 721420416
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.input.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfd9c0f8ff57ad5ed26a9b7212e11c436848e6f6e803ae4210ff1b358d0afa72
3
+ size 176256
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.other.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50dd427a7864e6ebe7531b43e1df1c221877b50f025296e3b36b1b3be2a5dc13
3
+ size 176256
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.output.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:49ce5456ec01ed652d4e287e6df237690d70ad60abe005528846d39ef6cd9253
3
+ size 176256
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.input.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d04504eeff2f40c9e65b5cf9337f1548d24c1c15c34767038209a8dfda597c8
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.output.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50dd427a7864e6ebe7531b43e1df1c221877b50f025296e3b36b1b3be2a5dc13
3
+ size 176256
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.weight.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:720727aaef967c1520e631818799f40f87992c588f21b10c0c76b1f94add94e1
3
+ size 721420416
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.input.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7d32e411316624c0ed2f7b8b3f3903d77f2ec4df90a122992967e28ed5443bb
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.output.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d84d4cf16d800314099f59c72c3dbc8bd50545e12ff918d73a9103fe18502263
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.input.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f91f84a998faf60febf5cc370068cc25087dd28529454af6895effc80f749cdf
3
+ size 640
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.other.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc7b5e0415522575497fcd7816ce22b9cc01ce828196f9d8abe3c2022d106b1f
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.output.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d088e565b13f1e598b9812bd92af28d4126cf5d9f248e49c4ceffc55233256a
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.input.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1b36652036c7c1122cc40bff58a40108586176e7a28ece717756559c52ca2e6
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.other.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61eb9ada5ac316af7282ebdb5baa7e0f6743dfb5cf23bc5cf07fd4a4568ff96f
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.output.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73df251ddb1b55009fc9df39a6967fe4226183a77c8ce21029ca718c9597567b
3
+ size 640
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_masked.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:06587b4c7ea681540fa693ac7daca7c426e936962a62b20a9f007968c9aba7a0
3
+ size 640
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_softmaxed.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17cc502b3f0d8fa3f52d7c3292055a318f473fc2116c299449a3d8175e08b212
3
+ size 640
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.input.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad9628fee7a934a08186ab804af49e013372cf3c3a22f83ce5b11d578409d240
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.output.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efc76cb674ffa88850c84c791dd858b2deb036319e63d27cf3a5249e604eb01d
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.weight.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdaded8169a538789d48e58777e61669d762b0f2c5d583def7210a3028e8c1a8
3
+ size 268435584
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.input.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2fbbefa0d551308808405c34178156e5cbf78fb7a230a5f3ca96a21693dbfbb
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.output.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50168efd64364b7850abbfa5e76dd0f8fd33f80b441e1e67761d7249b875640c
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.weight.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10e92abe6811d82d6e08a3821ace895ec8796101cb35d4c0c2342428113995bc
3
+ size 268435584
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.output.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50168efd64364b7850abbfa5e76dd0f8fd33f80b441e1e67761d7249b875640c
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.input.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad9628fee7a934a08186ab804af49e013372cf3c3a22f83ce5b11d578409d240
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.output.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94348e987f572a574ef468f678316072543a214538e166381efc615caf2ccb94
3
+ size 65664
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.weight.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:caf5788f94b619f9edf12d274f0330f0a1500066da663ba6d3423d1cd28fabe9
3
+ size 268435584
meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.cos.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ebbd8f3476ca6bc4fdb19a4f9666c225e1a81e13bf0817e834fdcf052b8deac
3
+ size 2176