diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/formatted_tensors.json b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/formatted_tensors.json new file mode 100644 index 0000000000000000000000000000000000000000..e0547993e0dbc7d2f25432ae47d82f5ce70e505f --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/formatted_tensors.json @@ -0,0 +1,733 @@ +{ + "meta-llama_Llama-2-7b-hf.model.embed_tokens.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 32000, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.embed_tokens.weight.npy" + }, + "meta-llama_Llama-2-7b-hf.model.embed_tokens.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.embed_tokens.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.input": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 1, + 1, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 4096, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.weight.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 1, + 1, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 4096, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.weight.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 1, + 1, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 4096, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.weight.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.cos": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 128 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.cos.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.sin": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 128 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.sin.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.query_states": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 32, + 1, + 128 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.query_states.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.key_states": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 32, + 1, + 128 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.key_states.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 32, + 1, + 128 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.other": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -2, + "shape": [ + 32, + 128, + 1 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.other.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 32, + 1, + 1 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_masked": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 32, + 1, + 1 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_masked.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_softmaxed": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 32, + 1, + 1 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_softmaxed.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 32, + 1, + 1 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.other": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -2, + "shape": [ + 32, + 1, + 128 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.other.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 32, + 1, + 128 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 1, + 1, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 4096, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.weight.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.add1.input": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add1.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.add1.other": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add1.other.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.add1.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add1.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.input": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 1, + 1, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 11008, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.weight.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 11008 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 1, + 1, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 11008, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.weight.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 11008 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.input": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 11008 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 11008 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.input": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 11008 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.other": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 11008 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.other.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 11008 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 1, + 1, + 11008 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 4096, + 11008 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.weight.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.add2.input": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add2.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.add2.other": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add2.other.npy" + }, + "meta-llama_Llama-2-7b-hf.model.layers.0.add2.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add2.output.npy" + }, + "meta-llama_Llama-2-7b-hf.model.norm.input": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.norm.input.npy" + }, + "meta-llama_Llama-2-7b-hf.model.norm.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 4096 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.norm.output.npy" + }, + "meta-llama_Llama-2-7b-hf.lm_head.input": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 1, + 1, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.lm_head.input.npy" + }, + "meta-llama_Llama-2-7b-hf.lm_head.weight": { + "tensor_meta": { + "is_emulated": true, + "dtype": "emulated_mxint8", + "block_size": 32, + "block_axis": -1, + "shape": [ + 32000, + 4096 + ] + }, + "exp_mantissa": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.lm_head.weight.npy" + }, + "meta-llama_Llama-2-7b-hf.lm_head.output": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.bfloat16", + "shape": [ + 1, + 1, + 32000 + ] + }, + "hex": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.lm_head.output.npy" + }, + "input_ids": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.int64", + "shape": [ + 1, + 1 + ] + }, + "int": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/input_ids.npy" + }, + "position_ids": { + "tensor_meta": { + "is_emulated": false, + "dtype": "torch.int64", + "shape": [ + 1, + 1 + ] + }, + "int": "saved_tensors/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/position_ids.npy" + } +} \ No newline at end of file diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/input_ids.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/input_ids.npy new file mode 100644 index 0000000000000000000000000000000000000000..6593c1ffa3b9085aace5bfe3b258f2915b04c94d --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/input_ids.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1eb47bb5e8322411424c1d3e4312e472aeb5cf21135a0650a14111ad8ac8b287 +size 136 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.lm_head.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.lm_head.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..df9d29579609ee8b5f65f7f5a7e674b72d437b3f --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.lm_head.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fef09aca4d64e1cbf4dc77219595c05a79b449c4764469f46947c12e9c09cfc +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.lm_head.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.lm_head.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..ae84108825675dc6df4321bcf40cadd114fe62f3 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.lm_head.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc369fa525501149827b8de254156fea89bd89fa2db15adc769d6ebe77f618af +size 512128 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.lm_head.weight.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.lm_head.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..697208ef82b392a78a4b0dc8b7bf25eaba641ea9 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.lm_head.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:562ba61b2aff15fbc3f097a75441dcdfa2c81c4e549df7cf8c132843e1e09dbd +size 2097152128 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.embed_tokens.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.embed_tokens.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..74e23fac51f5dba00e33129abf4b2c7b6b3e0116 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.embed_tokens.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98c01f35cf299b5c3bad268f643f2dee32d1126712b435d4a2e38febdb63c841 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.embed_tokens.weight.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.embed_tokens.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..9510410f0bfbb128f94cd489c8a8c4855c960024 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.embed_tokens.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d1388975e155edbc05fe1c28d0912655aa6265106f5192ef0810a3498c22e979 +size 2097152128 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add1.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add1.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..74e23fac51f5dba00e33129abf4b2c7b6b3e0116 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add1.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98c01f35cf299b5c3bad268f643f2dee32d1126712b435d4a2e38febdb63c841 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add1.other.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add1.other.npy new file mode 100644 index 0000000000000000000000000000000000000000..07cac02aed478a1adc29a26c9fcff1ddcb68d4f4 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add1.other.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50168efd64364b7850abbfa5e76dd0f8fd33f80b441e1e67761d7249b875640c +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add1.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add1.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..fb3a6c07e2a8091746a98bc0708730a1e439d782 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add1.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7d32e411316624c0ed2f7b8b3f3903d77f2ec4df90a122992967e28ed5443bb +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add2.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add2.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..fb3a6c07e2a8091746a98bc0708730a1e439d782 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add2.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7d32e411316624c0ed2f7b8b3f3903d77f2ec4df90a122992967e28ed5443bb +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add2.other.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add2.other.npy new file mode 100644 index 0000000000000000000000000000000000000000..74eb2cd25d90738e343915a634eb0d511552073b --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add2.other.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f30c21843e6f29d3c5dd1df4d9c4b86946ec81e6a98869744b888f0b983eba83 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add2.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add2.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..8e550325dc79bb16492af2f141f5626163825a4b --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.add2.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e188b25a4891476f2f2d61ddebe79eddb6c725eb56bc5575094b9098fa61eda +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..74e23fac51f5dba00e33129abf4b2c7b6b3e0116 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:98c01f35cf299b5c3bad268f643f2dee32d1126712b435d4a2e38febdb63c841 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..abed340dc60d2f6b9523521d3e1b8c0fdcecf6df --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.input_layernorm.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0eadb0ba19cff507cc1b50fa0adb3057a75be05df95f104c57f5e2824d8a7915 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..198358090f01248c64cb80b8dd05269daacf9203 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a45cb4954612c5ba9b9eac55b717a60fa0d2608e8b5464db0ca98c45e51d1a04 +size 176256 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..13c22b8bb2362521730cf318e39bce6d707bb925 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.act.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dfd9c0f8ff57ad5ed26a9b7212e11c436848e6f6e803ae4210ff1b358d0afa72 +size 176256 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..ecca79627cc6252155ec4443834a335e8b262597 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eab83152546ebdbd5902900867f768fa2bcd23eba17698c54a943d75d3c5a35c +size 176256 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..74eb2cd25d90738e343915a634eb0d511552073b --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f30c21843e6f29d3c5dd1df4d9c4b86946ec81e6a98869744b888f0b983eba83 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.weight.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..8eae46c783a893e41f8ec035bb193ca5f6072a24 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.down_proj.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:593afa2fc524c28e335c4db7df000ed9e94ea9940cb87a146a8dfb71cbf94fc0 +size 721420416 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..9dcb6949bde20d3bcc758f378b5fd56fdbbac790 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d04504eeff2f40c9e65b5cf9337f1548d24c1c15c34767038209a8dfda597c8 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..198358090f01248c64cb80b8dd05269daacf9203 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a45cb4954612c5ba9b9eac55b717a60fa0d2608e8b5464db0ca98c45e51d1a04 +size 176256 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.weight.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..37235b93ec2dd2536957717e71e2b95aa6f1f36a --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.gate_proj.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1520c00f2cee83a6a96805a1fdc51fb9483db5516a1b1b9a673f46329aeba7d0 +size 721420416 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..13c22b8bb2362521730cf318e39bce6d707bb925 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dfd9c0f8ff57ad5ed26a9b7212e11c436848e6f6e803ae4210ff1b358d0afa72 +size 176256 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.other.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.other.npy new file mode 100644 index 0000000000000000000000000000000000000000..0cf2db6c5103b6c931825ea0ec6540cf711c121c --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.other.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50dd427a7864e6ebe7531b43e1df1c221877b50f025296e3b36b1b3be2a5dc13 +size 176256 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..ce5a77fbac26ade473ec64d56ad9055a93effaf9 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.mult.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49ce5456ec01ed652d4e287e6df237690d70ad60abe005528846d39ef6cd9253 +size 176256 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..9dcb6949bde20d3bcc758f378b5fd56fdbbac790 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d04504eeff2f40c9e65b5cf9337f1548d24c1c15c34767038209a8dfda597c8 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..0cf2db6c5103b6c931825ea0ec6540cf711c121c --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50dd427a7864e6ebe7531b43e1df1c221877b50f025296e3b36b1b3be2a5dc13 +size 176256 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.weight.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..d0e30da95a96c537bccc1074050fd77d5e1fef7d --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.mlp.up_proj.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:720727aaef967c1520e631818799f40f87992c588f21b10c0c76b1f94add94e1 +size 721420416 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..fb3a6c07e2a8091746a98bc0708730a1e439d782 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7d32e411316624c0ed2f7b8b3f3903d77f2ec4df90a122992967e28ed5443bb +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..6dc41dd569ab20d7e99234887a53732e0b3c7e09 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.post_attention_layernorm.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d84d4cf16d800314099f59c72c3dbc8bd50545e12ff918d73a9103fe18502263 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..90c522f6f2c6ef35d47a26cea5b18eb38c5c08eb --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f91f84a998faf60febf5cc370068cc25087dd28529454af6895effc80f749cdf +size 640 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.other.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.other.npy new file mode 100644 index 0000000000000000000000000000000000000000..6ecd0ceb544917fd9833dd5b5976ef474e1b94e8 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.other.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc7b5e0415522575497fcd7816ce22b9cc01ce828196f9d8abe3c2022d106b1f +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..dd653be8eac24546e22f7f27ba1fd40e45f97365 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_output.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d088e565b13f1e598b9812bd92af28d4126cf5d9f248e49c4ceffc55233256a +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..72bfe5eb02f8e40f1caee1649af64bbbdda3c069 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1b36652036c7c1122cc40bff58a40108586176e7a28ece717756559c52ca2e6 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.other.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.other.npy new file mode 100644 index 0000000000000000000000000000000000000000..3b584b6a757eabbec19b08e8d7ef6e7409b1b9a4 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.other.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61eb9ada5ac316af7282ebdb5baa7e0f6743dfb5cf23bc5cf07fd4a4568ff96f +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..7b88a16e256555d60d1866204650483fcb42a79a --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73df251ddb1b55009fc9df39a6967fe4226183a77c8ce21029ca718c9597567b +size 640 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_masked.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_masked.npy new file mode 100644 index 0000000000000000000000000000000000000000..218cb08cf997ed7b17ba242da76927fd5309e65d --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_masked.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06587b4c7ea681540fa693ac7daca7c426e936962a62b20a9f007968c9aba7a0 +size 640 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_softmaxed.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_softmaxed.npy new file mode 100644 index 0000000000000000000000000000000000000000..bae379e173c543699aee73afd50597bcb3faf549 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.attn_weights_softmaxed.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:17cc502b3f0d8fa3f52d7c3292055a318f473fc2116c299449a3d8175e08b212 +size 640 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..3c9a8f64fadacb16a331843a2b42fff90658a3e5 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad9628fee7a934a08186ab804af49e013372cf3c3a22f83ce5b11d578409d240 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..adcd9c675c5654a88eeec33b8618e244c6c91f23 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:efc76cb674ffa88850c84c791dd858b2deb036319e63d27cf3a5249e604eb01d +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.weight.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..4193e54c801f32302aff3702b69aa03a0bf7b3d7 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.k_proj.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fdaded8169a538789d48e58777e61669d762b0f2c5d583def7210a3028e8c1a8 +size 268435584 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..2958e4af0bbba511e7d7667cf61eb610530922e2 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f2fbbefa0d551308808405c34178156e5cbf78fb7a230a5f3ca96a21693dbfbb +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..07cac02aed478a1adc29a26c9fcff1ddcb68d4f4 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50168efd64364b7850abbfa5e76dd0f8fd33f80b441e1e67761d7249b875640c +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.weight.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..fe7f0de629046fea8cd61ca105258ba1f22b6ea4 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.o_proj.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:10e92abe6811d82d6e08a3821ace895ec8796101cb35d4c0c2342428113995bc +size 268435584 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..07cac02aed478a1adc29a26c9fcff1ddcb68d4f4 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:50168efd64364b7850abbfa5e76dd0f8fd33f80b441e1e67761d7249b875640c +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..3c9a8f64fadacb16a331843a2b42fff90658a3e5 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad9628fee7a934a08186ab804af49e013372cf3c3a22f83ce5b11d578409d240 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..cade7fa97e0b7edec152f60e34c4f835c7e1c35a --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94348e987f572a574ef468f678316072543a214538e166381efc615caf2ccb94 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.weight.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..1c5cedfb798ca594bfcb3115a49c820c77306ab4 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.q_proj.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:caf5788f94b619f9edf12d274f0330f0a1500066da663ba6d3423d1cd28fabe9 +size 268435584 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.cos.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.cos.npy new file mode 100644 index 0000000000000000000000000000000000000000..c58e9ca3966c385461c89a37bf4457e224b4e8c5 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.cos.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ebbd8f3476ca6bc4fdb19a4f9666c225e1a81e13bf0817e834fdcf052b8deac +size 2176 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.key_states.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.key_states.npy new file mode 100644 index 0000000000000000000000000000000000000000..6e21cf1ed6cadf1cae88bf3ebee70d6a8fe6e1be --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.key_states.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9d6aff39f9944434919e6a8015b6d627490221b00189f99e2ab182cb32751ab0 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.query_states.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.query_states.npy new file mode 100644 index 0000000000000000000000000000000000000000..8119003e2892f2fa0b6c1a64b3d6b0f2840098ac --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.query_states.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0b73f4bb5a3cabffa0572ec0a9a122d41b5c31fd58c35d372b1b0d7133ea588 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.sin.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.sin.npy new file mode 100644 index 0000000000000000000000000000000000000000..cd3ee152df8d523dff2b90f07b4ab1161ba415f7 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.rope.sin.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:99e564b613bab09285dcfc1f321f81072871ec2c377bb6b0e1fdb7aa800c7922 +size 2176 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..3c9a8f64fadacb16a331843a2b42fff90658a3e5 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad9628fee7a934a08186ab804af49e013372cf3c3a22f83ce5b11d578409d240 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..ce8dd16efa080f516b355c83cf8ea6ecff028842 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2663a5a26c19efaf6eac13573ff6f7d96a7895c79a89c6a6336d8d0343c9ed95 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.weight.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.weight.npy new file mode 100644 index 0000000000000000000000000000000000000000..079c1a3f0b9838e07d23b9724d56c9ec6ad718f9 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.layers.0.self_attn.v_proj.weight.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a8d269ff3848bbf9cb0f7229ab080a8edaf73617fd490b612a67b7aaaf4d2bb +size 268435584 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.norm.input.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.norm.input.npy new file mode 100644 index 0000000000000000000000000000000000000000..00ce6054fd08d903750985b1045df5bf3fc3ba89 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.norm.input.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db800b28786309d6ee30b6e7edd9976694d4e562b2f175c2c439ef0acc1dc232 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.norm.output.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.norm.output.npy new file mode 100644 index 0000000000000000000000000000000000000000..71a7ab1010c63cd091d6d1d4bb7e724f82451472 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/meta-llama_Llama-2-7b-hf.model.norm.output.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12293daca489e22a19ef8ada6f03121ff9bd279131e05a187c7be7d762bf1728 +size 65664 diff --git a/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/position_ids.npy b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/position_ids.npy new file mode 100644 index 0000000000000000000000000000000000000000..121a65ba1324b75c295b3ee5c78d1822ab469442 --- /dev/null +++ b/meta-llama-Llama-2-7b-hf-mxint8/token-1024-pos-1/position_ids.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dae8291481dcbf7038f1cba4f8c748f365cc5fb28cee29a612c9d22be237dfe9 +size 136